Merge "msm: vidc: Add support for Mpeg-2 open-gop handling"
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
index 70fea73..e777094 100644
--- a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -6,22 +6,32 @@
- reg: offset and length of the register regions(s) for the device.
- reg-names: a list of strings that map in order to the list of regs.
-- <supply-name>-supply: phandle to the regulator device tree node.
-- <compatible-name>-supply-names: a list of strings that map in order
+- hpd-5v-supply: phandle to the 5V regulator device tree node.
+- core-vdda-supply: phandle to the HDMI vdda regulator device tree node.
+- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
+- qcom,hdmi-tx-supply-names: a list of strings that map in order
to the list of supplies.
-- <<compatible-name>-supply-type: a type of supply(ies) mentioned above.
+- qcom,hdmi-tx-supply-type: a type of supply(ies) mentioned above.
0 = supply with controlled output
1 = supply without controlled output. i.e. voltage switch
-- <compatible-name>-min-voltage-level: specifies minimum voltage level
+- qcom,hdmi-tx-min-voltage-level: specifies minimum voltage level
of supply(ies) mentioned above.
-- <compatible-name>-max-voltage-level: specifies maximum voltage level
+- qcom,hdmi-tx-max-voltage-level: specifies maximum voltage level
of supply(ies) mentioned above.
-- <compatible-name>-op-mode: specifies optimum operating mode of
+- qcom,hdmi-tx-op-mode: specifies optimum operating mode of
supply(ies) mentioned above.
-- gpios: specifies gpios assigned for the device.
-- <compatible-name>-gpio-names: a list of strings that map in order to
- the list of gpios
+- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
+- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
+- qcom,hdmi-tx-ddc-data: gpio for ddc data line.
+- qcom,hdmi-tx-hpd: gpio required for HDMI hot-plug detect.
+
+Optional properties:
+- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between
+ docking station, type A, and liquid device, type D, ports. Required
+ property for liquid devices.
+- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output
+ on liquid devices. Required property for liquid devices.
Example:
qcom,hdmi_tx@fd922100 {
@@ -41,10 +51,8 @@
qcom,hdmi-tx-max-voltage-level = <0 1800000 1800000>;
qcom,hdmi-tx-op-mode = <0 1800000 0>;
- gpios = <&msmgpio 31 0>,
- <&msmgpio 32 0>,
- <&msmgpio 33 0>,
- <&msmgpio 34 0>;
- qcom,hdmi-tx-gpio-names =
- "cec-pin", "hpd-ddc-clk", "hpd-ddc-data", "hpd-pin";
+ qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+ qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+ qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+ qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
};
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 38b2721..9164647 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -5,7 +5,12 @@
Required properties:
- label: A string used as a descriptive name for the device.
- compatible: Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
-- reg: Specifies the base address and address size for this device.
+- reg: Specifies the register base address and size. The second interval
+ specifies the shader memory base address and size.
+- reg-names: Resource names used for the physical address of device registers
+ and shader memory. "kgsl_3d0_reg_memory" gives the physical address
+ and length of device registers while "kgsl_3d0_shader_memory" gives
+ physical address and length of device shader memory.
- interrupts: Interrupt mapping for GPU IRQ.
- interrupt-names: String property to describe the name of the interrupt.
- qcom,id: An integer used as an identification number for the device.
@@ -70,8 +75,9 @@
qcom,kgsl-3d0@fdb00000 {
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
- reg = <0xfdb00000 0x20000>;
- reg-names = "kgsl_3d0_reg_memory";
+ reg = <0xfdb00000 0x10000
+ 0xfdb20000 0x10000>;
+ reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_shader_memory";
interrupts = <0 33 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
index 33d5cc1..e458ea0 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -11,6 +11,7 @@
- compatible : should be "qcom,qpnp-iadc" for Current ADC driver.
- reg : offset and length of the PMIC Aribter register map.
- interrupts : The USR bank peripheral IADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set".
- qcom,adc-bit-resolution : Bit resolution of the ADC.
- qcom,adc-vdd-reference : Voltage reference used by the ADC.
- qcom,rsense : Internal rsense resistor used for current measurements.
@@ -84,6 +85,7 @@
compatible = "qcom,qpnp-iadc";
reg = <0x3200 0x100>;
interrupts = <0 0x36 0>;
+ interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
qcom,rsense = <1500>;
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index d7d3ec2..e23605c 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -11,6 +11,7 @@
- compatible : should be "qcom,qpnp-vadc" for Voltage ADC driver.
- reg : offset and length of the PMIC Aribter register map.
- interrupts : The USR bank peripheral VADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set".
- qcom,adc-bit-resolution : Bit resolution of the ADC.
- qcom,adc-vdd-reference : Voltage reference used by the ADC.
@@ -82,6 +83,7 @@
compatible = "qcom,qpnp-vadc";
reg = <0x3100 0x100>;
interrupts = <0x0 0x31 0x0>;
+ interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1800>;
diff --git a/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
new file mode 100644
index 0000000..ed45192
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
@@ -0,0 +1,26 @@
+* Silicon Image-8334 MHL Tx
+
+Required properties:
+- compatible: must be "qcom,mhl-sii8334"
+- reg: i2c slave address
+- mhl-intr-gpio: MHL interrupt gpio coming out of sii8334
+- mhl-pwr-gpio: MHL power gpio required for power rails
+- mhl-rst-gpio: MHL reset gpio going into sii8334 for toggling reset pin
+- <supply-name>-supply: phandle to the regulator device tree node.
+
+Example:
+ i2c@f9967000 {
+ sii8334@72 {
+ compatible = "qcom,mhl-sii8334";
+ reg = <0x72>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <82 0x8>;
+ mhl-intr-gpio = <&msmgpio 82 0>;
+ mhl-pwr-gpio = <&msmgpio 12 0>;
+ mhl-rst-gpio = <&pm8941_mpps 8 0>;
+ avcc_18-supply = <&pm8941_l24>;
+ avcc_12-supply = <&pm8941_l2>;
+ smps3a-supply = <&pm8941_s3>;
+ vdda-supply = <&pm8941_l12>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 556300d..802716c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -18,8 +18,7 @@
- qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
images and self-authentication is not desired;
<1> if the hardware requires self-authenticating images.
-- qcom,is_loadable: <0> if PIL should not load the modem image
- <1> if PIL is required to load the modem image
+- qcom,is-loadable: if PIL is required to load the modem image
Example:
qcom,mss@fc880000 {
@@ -34,7 +33,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..86c60e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,81 @@
+Qualcomm Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+ registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+ "bam-base" - string to identify the IPA BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+ "bam-irq" - string to identify the IPA BAM interrupt.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+ 0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+ destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size: Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+ size of the IPA pipe memory region.
+ Pipe memory is a feature which may be supported by the
+ target (HW platform). The Driver support using pipe
+ memory instead of system memory. In case this property
+ will not appear in the IPA DTS entry, the driver will
+ use system memory.
+
+Example:
+
+qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index 5e7c42a..43033a8 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
- qcom, msm_bus,name: Should be "qseecom-noc"
- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
- qcom, msm_bus,num_paths: The paths for source and destination ports
@@ -10,6 +11,8 @@
Example:
qcom,qseecom@fe806000 {
compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
qcom,msm_bus,name = "qseecom-noc";
qcom,msm_bus,num_cases = <4>;
qcom,msm_bus,active_only = <0>;
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 2c74415..213da90 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -425,4 +425,71 @@
qcom,msm-mi2s-rx-lines = <2>;
qcom,msm-mi2s-tx-lines = <1>;
};
-};
\ No newline at end of file
+};
+
+* MSM9625 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,mdm9625-audio-taiko"
+- qcom,model : The user-visible name of this sound card.
+- qcom,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source.
+- qcom,taiko-mclk-clk-freq : Master clock value given to codec. Some WCD9XXX
+ codec can run at different mclk values. Mclk value can be 9.6MHz or 12.288MHz.
+ This element represents the value for MCLK provided to codec.
+
+Example:
+
+sound {
+ compatible = "qcom,mdm9625-audio-taiko";
+ qcom,model = "mdm9625-taiko-i2s-snd-card";
+
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "LDO_H", "MCLK",
+ "Ext Spk Bottom Pos", "LINEOUT1",
+ "Ext Spk Bottom Neg", "LINEOUT3",
+ "Ext Spk Top Pos", "LINEOUT2",
+ "Ext Spk Top Neg", "LINEOUT4",
+ "AMIC1", "MIC BIAS1 External",
+ "MIC BIAS1 External", "Handset Mic",
+ "AMIC2", "MIC BIAS2 External",
+ "MIC BIAS2 External", "Headset Mic",
+ "AMIC3", "MIC BIAS3 Internal1",
+ "MIC BIAS3 Internal1", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS1 Internal2",
+ "MIC BIAS1 Internal2", "ANCLeft Headset Mic",
+ "DMIC1", "MIC BIAS1 External",
+ "MIC BIAS1 External", "Digital Mic1",
+ "DMIC2", "MIC BIAS1 External",
+ "MIC BIAS1 External", "Digital Mic2",
+ "DMIC3", "MIC BIAS3 External",
+ "MIC BIAS3 External", "Digital Mic3",
+ "DMIC4", "MIC BIAS3 External",
+ "MIC BIAS3 External", "Digital Mic4",
+ "DMIC5", "MIC BIAS4 External",
+ "MIC BIAS4 External", "Digital Mic5",
+ "DMIC6", "MIC BIAS4 External",
+ "MIC BIAS4 External", "Digital Mic6";
+ qcom,taiko-mclk-clk-freq = <12288000>;
+};
+
+* msm-adsp-loader
+
+Required properties:
+ - compatible : "msm-adsp-loader"
+ - qcom,adsp-state:
+ It is possible that some MSM use PIL to load the ADSP image. While
+ other MSM may use SBL to load the ADSP image at boot. Audio APR needs
+ state of ADSP to register and enable APR to be used for sending commands
+ to ADSP. so adsp-state represents the state of ADSP to ADSP loader. Value
+ of 0 indicates ADSP loader needs to use PIL and value of 2 means ADSP
+ image is already loaded by SBL.
+
+Example:
+
+qcom,msm-adsp-loader {
+ compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <2>;
+};
diff --git a/Documentation/devicetree/bindings/sound/taiko_codec.txt b/Documentation/devicetree/bindings/sound/taiko_codec.txt
index 96e3a61..090d8db 100644
--- a/Documentation/devicetree/bindings/sound/taiko_codec.txt
+++ b/Documentation/devicetree/bindings/sound/taiko_codec.txt
@@ -22,12 +22,14 @@
- qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
- qcom,cdc-micbias-cfilt2-mv - cfilt2 output voltage in milli volts.
- qcom,cdc-micbias-cfilt3-mv - cfilt3 output voltage in milli volts.
- cfilt volatge can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
+ cfilt voltage can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
- qcom,cdc-micbias1-cfilt-sel = cfilt to use for micbias1 (should be from 1 to 3).
- qcom,cdc-micbias2-cfilt-sel = cfilt to use for micbias2 (should be from 1 to 3).
- qcom,cdc-micbias3-cfilt-sel = cfilt to use for micbias3 (should be from 1 to 3).
- qcom,cdc-micbias4-cfilt-sel = cfilt to use for micbias4 (should be from 1 to 3).
+ This value represents the connected CFLIT to MIC Bias.
+
- qcom,cdc-micbias1-ext-cap: Boolean. Enable micbias 1 external capacitor mode.
- qcom,cdc-micbias2-ext-cap: Boolean. Enable micbias 2 external capacitor mode.
- qcom,cdc-micbias3-ext-cap: Boolean. Enable micbias 3 external capacitor mode.
@@ -88,3 +90,109 @@
qcom,cdc-slim-ifd = "taiko-slim-ifd";
qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 00 17 02];
};
+
+Wcd9xxx audio CODEC in I2C mode
+
+ - compatible = "qcom,wcd9xxx-i2c-device";
+ - reg: represents the slave address provided to the I2C driver.
+ - qcom,cdc-reset-gpio: gpio used for codec SOC reset.
+ - <supply-name>-supply: phandle to the regulator device tree node.
+ - qcom,<supply-name>-voltage - specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units mV.
+ - qcom,<supply-name>-current - specifies max current in mA that can drawn
+ from the <supply-name>.
+
+ above three properties with "supply-name" set to "qcom,cdc-vdd-buck", "qcom,cdc-vdd-tx-h",
+ "qcom,cdc-vdd-rx-h", "qcom,cdc-vddpx-1", "qcom,cdc-vdd-a-1p2v", "qcom,cdc-vddcx-1",
+ "qcom,cdc-vddcx-2" should be present.
+
+ - qcom,cdc-micbias-ldoh-v - LDOH output in volts ( should be 1.95 V and 3.00 V).
+
+ - qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
+ - qcom,cdc-micbias-cfilt2-mv - cfilt2 output voltage in milli volts.
+ - qcom,cdc-micbias-cfilt3-mv - cfilt3 output voltage in milli volts.
+ cfilt voltage can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
+
+ - qcom,cdc-micbias1-cfilt-sel = cfilt to use for micbias1 (should be from 1 to 3).
+ - qcom,cdc-micbias2-cfilt-sel = cfilt to use for micbias2 (should be from 1 to 3).
+ - qcom,cdc-micbias3-cfilt-sel = cfilt to use for micbias3 (should be from 1 to 3).
+ - qcom,cdc-micbias4-cfilt-sel = cfilt to use for micbias4 (should be from 1 to 3).
+ This value represents the connected CFLIT to MIC Bias.
+
+ - qcom,cdc-micbias1-ext-cap: Boolean. Enable micbias 1 external capacitor mode.
+ - qcom,cdc-micbias2-ext-cap: Boolean. Enable micbias 2 external capacitor mode.
+ - qcom,cdc-micbias3-ext-cap: Boolean. Enable micbias 3 external capacitor mode.
+ - qcom,cdc-micbias4-ext-cap: Boolean. Enable micbias 4 external capacitor mode.
+
+Example:
+i2c@f9925000 {
+ cell-index = <3>;
+ compatible = "qcom,i2c-qup";
+ reg = <0xf9925000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ interrupts = <0 97 0>;
+ interrupt-names = "qup_err_intr";
+ qcom,i2c-bus-freq = <100000>;
+ qcom,i2c-src-freq = <24000000>;
+
+ wcd9xxx_codec@0d{
+ compatible = "qcom,wcd9xxx-i2c";
+ reg = <0x0d>;
+ qcom,cdc-reset-gpio = <&msmgpio 22 0>;
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28>;
+
+ cdc-vdd-buck-supply = <&pm8019_l11>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <25000>;
+
+ cdc-vdd-tx-h-supply = <&pm8019_l11>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ cdc-vdd-rx-h-supply = <&pm8019_l11>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ cdc-vddpx-1-supply = <&pm8019_l11>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ cdc-vdd-a-1p2v-supply = <&pm8019_l9>;
+ qcom,cdc-vdd-a-1p2v-voltage = <1200000 1200000>;
+ qcom,cdc-vdd-a-1p2v-current = <10000>;
+
+ cdc-vddcx-1-supply = <&pm8019_l9>;
+ qcom,cdc-vddcx-1-voltage = <1200000 1200000>;
+ qcom,cdc-vddcx-1-current = <10000>;
+
+ cdc-vddcx-2-supply = <&pm8019_l9>;
+ qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
+ qcom,cdc-vddcx-2-current = <10000>;
+
+ qcom,cdc-micbias-ldoh-v = <0x3>;
+ qcom,cdc-micbias-cfilt1-mv = <1800>;
+ qcom,cdc-micbias-cfilt2-mv = <2700>;
+ qcom,cdc-micbias-cfilt3-mv = <1800>;
+ qcom,cdc-micbias1-cfilt-sel = <0x0>;
+ qcom,cdc-micbias2-cfilt-sel = <0x1>;
+ qcom,cdc-micbias3-cfilt-sel = <0x2>;
+ qcom,cdc-micbias4-cfilt-sel = <0x2>;
+ };
+
+ wcd9xxx_codec@77{
+ compatible = "qcom,wcd9xxx-i2c";
+ reg = <0x77>;
+ };
+
+ wcd9xxx_codec@66{
+ compatible = "qcom,wcd9xxx-i2c";
+ reg = <0x66>;
+ };
+ wcd9xxx_codec@55{
+ compatible = "qcom,wcd9xxx-i2c";
+ reg = <0x55>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index e784bfa..ae7d736 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,15 +34,20 @@
- reg : offset and length of the register set for the device.
- interrupts : should contain the uart interrupt.
-Optional properties:
-- cell-index: An integer specifying the line number of the UART device that
- represents this HSL hardware instance.
+Aliases:
+An alias may optionally be used to bind the serial device to a tty device
+(ttyHSLx) with a given line number. Aliases are of the form serial<n> where <n>
+is an integer representing the line number to use. On systems with multiple
+serial devices present it is recommended that an alias be defined for each such
+device.
Example:
+ aliases {
+ serial0 = &uart0; // This device will be called ttyHSL0
+ };
- serial@19c400000 {
+ uart0: serial@19c400000 {
compatible = "qcom,msm-lsuart-v14"
reg = <0x19c40000 0x1000">;
interrupts = <195>;
- cell-index = <0>; // this device will be named ttyHSL0
};
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
new file mode 100644
index 0000000..0e59f69
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -0,0 +1,20 @@
+MSM HSIC EHCI controller
+
+Required properties :
+- compatible : should be "qcom,hsic-host"
+- regs : offset and length of the register set in the memory map
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+ HSIC EHCI expects "core_irq" and optionally "async_irq".
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSIC_VDDCX" and optionally - "HSIC_GDSC".
+
+Example MSM HSIC EHCI controller device node :
+ hsic@f9a15000 {
+ compatible = "qcom,hsic-host";
+ reg = <0xf9a15000 0x400>;
+ interrupts = <0 136 0>;
+ interrupt-names = "core_irq";
+ HSIC_VDDCX-supply = <&pm8019_l12>;
+ HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 7bff0f2..015822f 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -100,13 +100,24 @@
- compatible: should be "qcom,usb-bam-msm"
- reg : pairs of physical base addresses and region sizes
of all the memory mapped BAM devices present
-- reg-names : Register region name(s) referenced in reg above
- SSUSB BAM expects "ssusb" and "hsusb" for HSSUB BAM.
- Specify "qscratch_ram1_reg" to provide QSCRATCH's RAM1
- register to control USB3 private memory for uses as BAM FIFOs.
+- reg-names : Register region name(s), in 1-1 correspondence with the
+ registers in 'reg'. This list should contain at least as many names
+ as the number of unique values given in both 'usb-active-bam' and
+ all the subnodes' 'usb-bam-type' properties.
+
+ If SSUSB_BAM is used, "ssusb" should be present.
+ If HSUSB_BAM is used, "hsusb" should be present.
+ If HSIC_BAM is used, "hsic" should be present.
+
+ If a QSCRATCH RAM1 register is designated for providing USB3
+ private memory to use as a BAM FIFO, specify "qscratch_ram1_reg".
- interrupts: IRQ lines for BAM devices
-- interrupt-names: BAM interrupt name(s) referenced in interrupts above
- SSUSB BAM expects "ssusb" and "hsusb" for HSSUB BAM
+- interrupt-names: BAM interrupt name(s), in 1-1 correspondence with
+ 'interrupts' above.
+
+ If SSUSB_BAM is used, "ssusb" should be present.
+ If HSUSB_BAM is used, "hsusb" should be present.
+ If HSIC_BAM is used, "hsic" should be present.
- qcom,usb-active-bam: active BAM type. Can be one of
0 - SSUSB_BAM
1 - HSUSB_BAM
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 57d776f..d686523 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -10,12 +10,20 @@
"irq" : Interrupt for DWC3 core
"otg_irq" : Interrupt for DWC3 core's OTG Events
- <supply-name>-supply: phandle to the regulator device tree node
- Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
- "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3" and "vbus_dwc3".
+ Required "supply-name" examples are:
+ "SSUSB_lp8" : 1.8v supply for SSPHY
+ "HSUSB_1p8" : 1.8v supply for HSPHY
+ "HSUSB_3p3" : 3.3v supply for HSPHY
+ "vbus_dwc3" : vbus supply for host mode
+ "ssusb_vdd_dig" : vdd supply for SSPHY digital circuit operation
+ "hsusb_vdd_dig" : vdd supply for HSPHY digital circuit operation
- qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
the DBM (Device Bus Manager). The DBM is HW unit which is part of
the MSM USB3.0 core (which also includes the Synopsys DesignWare
USB3.0 controller)
+- qcom,vdd-voltage-level: This property must be a list of three integer
+ values (no, min, max) where each value represents either a voltage in
+ microvolts or a value corresponding to voltage corner
Optional properties :
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
@@ -40,13 +48,14 @@
<0xFD4AB000 0x4>;
interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
interrupt-names = "irq", "otg_irq", "hs_phy_irq";
- SSUSB_VDDCX-supply = <&pm8841_s2>;
+ ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
SSUSB_1p8-supply = <&pm8941_l6>;
- HSUSB_VDDCX-supply = <&pm8841_s2>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
HSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_3p3-supply = <&pm8941_l24>;
vbus_dwc3-supply = <&pm8941_mvs1>;
qcom,dwc-usb3-msm-dbm-eps = <4>
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,msm_bus,name = "usb3";
qcom,msm_bus,num_cases = <2>;
diff --git a/Documentation/dvb/qcom-mpq.txt b/Documentation/dvb/qcom-mpq.txt
index 28f5d39..1196da0 100644
--- a/Documentation/dvb/qcom-mpq.txt
+++ b/Documentation/dvb/qcom-mpq.txt
@@ -123,17 +123,15 @@
Background Processing
---------------------
-When demux receives notifications from underlying HW drivers about new
-data, it schedules work to a single-threaded workqueue to process the
-notification.
+Demux allocates a kernel thread for each live-input to process
+the TS packets notified from the HW for specific input. There
+are two such inputs (TSIF0 and TSIF1), both can be processed in
+parallel by two seperate threads.
The processing is the action of demuxing of the new data; it may sleep
as it locks against the demux data-structure that may be accessed by
user-space in the meanwhile.
-A single threaded workqueue exists for each live input (TSIF0 or TSIF1)
-to process the inputs in parallel.
-
Dependencies
------------
The demux driver depends on the following kernel drivers and subsystems:
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 7961b78..502d34a 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -272,5 +272,33 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8644.dtsi"
/include/ "mpq8092-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
index f83fe76..f0570ba 100644
--- a/arch/arm/boot/dts/msm-gdsc.dtsi
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -18,41 +18,48 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_venus";
reg = <0xfd8c1024 0x4>;
+ status = "disabled";
};
gdsc_mdss: qcom,gdsc@fd8c2304 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_mdss";
reg = <0xfd8c2304 0x4>;
+ status = "disabled";
};
gdsc_jpeg: qcom,gdsc@fd8c35a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_jpeg";
reg = <0xfd8c35a4 0x4>;
+ status = "disabled";
};
gdsc_vfe: qcom,gdsc@fd8c36a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_vfe";
reg = <0xfd8c36a4 0x4>;
+ status = "disabled";
};
gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_gx";
reg = <0xfd8c4024 0x4>;
+ status = "disabled";
};
gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_cx";
reg = <0xfd8c4034 0x4>;
+ status = "disabled";
};
gdsc_usb_hsic: qcom,gdsc@fc400404 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_usb_hsic";
reg = <0xfc400404 0x4>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index bf1c971..6538db5 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -58,10 +58,10 @@
};
};
- bms@4000 {
+ pm8941_bms: bms@4000 {
#address-cells = <1>;
#size-cells = <1>;
-
+ status = "disabled";
compatible = "qcom,qpnp-bms";
reg = <0x4000 0x100>;
@@ -487,6 +487,7 @@
compatible = "qcom,qpnp-vadc";
reg = <0x3100 0x100>;
interrupts = <0x0 0x31 0x0>;
+ interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1800>;
@@ -698,12 +699,24 @@
qcom,hw-settle-time = <0>;
qcom,fast-avg-setup = <0>;
};
+
+ chan@185 {
+ label = "usb_id";
+ qcom,channel-num = <185>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
};
iadc@3600 {
compatible = "qcom,qpnp-iadc";
reg = <0x3600 0x100>;
interrupts = <0x0 0x36 0x0>;
+ interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <16>;
qcom,adc-vdd-reference = <1800>;
qcom,rsense = <1500>;
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 09b57a4..b900c3f 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -12,6 +12,7 @@
/include/ "skeleton.dtsi"
/include/ "msm8226-ion.dtsi"
+/include/ "msm-gdsc.dtsi"
/ {
model = "Qualcomm MSM 8226";
@@ -84,4 +85,28 @@
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm8226-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8910-ion.dtsi b/arch/arm/boot/dts/msm8910-ion.dtsi
new file mode 100644
index 0000000..88bb1ab
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-ion.dtsi
@@ -0,0 +1,62 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@30 { /* SYSTEM HEAP */
+ reg = <30>;
+ };
+
+ qcom,ion-heap@8 { /* CP_MM HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <8>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x3800000>;
+ };
+
+ qcom,ion-heap@25 { /* IOMMU HEAP */
+ reg = <25>;
+ };
+
+ qcom,ion-heap@27 { /* QSECOM HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <27>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x780000>;
+ };
+
+ qcom,ion-heap@28 { /* AUDIO HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <28>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x314000>;
+ };
+
+ qcom,ion-heap@29 { /* FIRMWARE HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <29>;
+ qcom,heap-align = <0x20000>;
+ qcom,heap-adjacent = <8>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0xA00000>;
+ };
+
+ };
+};
+
diff --git a/arch/arm/boot/dts/msm8910-rumi.dts b/arch/arm/boot/dts/msm8910-rumi.dts
new file mode 100644
index 0000000..0d944aa
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-rumi.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8910.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8910 Rumi";
+ compatible = "qcom,msm8910-rumi", "qcom,msm8910";
+ qcom,msm-id = <147 1 0>;
+
+ serial@f991f000 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8910.dtsi b/arch/arm/boot/dts/msm8910.dtsi
index 2a2e764..9514e5a 100644
--- a/arch/arm/boot/dts/msm8910.dtsi
+++ b/arch/arm/boot/dts/msm8910.dtsi
@@ -11,6 +11,7 @@
*/
/include/ "skeleton.dtsi"
+/include/ "msm8910-ion.dtsi"
/ {
model = "Qualcomm MSM 8910";
@@ -126,6 +127,73 @@
qcom,current-limit = <800>;
};
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ qcom,pil-string = "adsp";
+ interrupts = <0 156 1>;
+ };
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ };
+
+ qcom,smd-wcnss {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <6>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x20000>;
+ qcom,pil-string = "wcnss";
+ interrupts = <0 142 1>;
+ };
+
+ qcom,smsm-wcnss {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <6>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x80000>;
+ interrupts = <0 144 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ };
+ };
};
/include/ "msm8910-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index e1b2863..7557fd1 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -192,6 +193,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc2 {
#address-cells = <0>;
interrupt-parent = <&sdcc2>;
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 15fb799..cf45ceb 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -109,6 +110,22 @@
};
};
+ i2c@f9967000 {
+ sii8334@72 {
+ compatible = "qcom,mhl-sii8334";
+ reg = <0x72>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <82 0x8>;
+ mhl-intr-gpio = <&msmgpio 82 0>;
+ mhl-pwr-gpio = <&msmgpio 12 0>;
+ mhl-rst-gpio = <&pm8941_mpps 8 0>;
+ avcc_18-supply = <&pm8941_l24>;
+ avcc_12-supply = <&pm8941_l2>;
+ smps3a-supply = <&pm8941_s3>;
+ vdda-supply = <&pm8941_l12>;
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -192,6 +209,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc1 {
qcom,bus-width = <4>;
};
@@ -256,6 +333,13 @@
};
gpio@cb00 { /* GPIO 12 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-select = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@cc00 { /* GPIO 13 */
@@ -384,6 +468,12 @@
};
mpp@a700 { /* MPP 8 */
+ qcom,mode = <1>; /* DIG_OUT */
+ qcom,output-type = <0>; /* CMOS */
+ qcom,pull-up = <0>;
+ qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
+ qcom,src-select = <0>; /* CONSTANT */
+ qcom,master-en = <1>; /* ENABLE MPP */
};
};
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
index 403a5cc..6623568 100644
--- a/arch/arm/boot/dts/msm8974-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -13,8 +13,9 @@
qcom,kgsl-3d0@fdb00000 {
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
- reg = <0xfdb00000 0x20000>;
- reg-names = "kgsl_3d0_reg_memory";
+ reg = <0xfdb00000 0x10000
+ 0xfdb20000 0x10000>;
+ reg-names = "kgsl_3d0_reg_memory" , "kgsl_3d0_shader_memory";
interrupts = <0 33 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
diff --git a/arch/arm/boot/dts/msm8974-leds.dtsi b/arch/arm/boot/dts/msm8974-leds.dtsi
new file mode 100644
index 0000000..89bb687
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-leds.dtsi
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d000 {
+ status = "okay";
+ qcom,rgb_0 {
+ label = "rgb";
+ linux,name = "led:rgb_red";
+ qcom,mode = <0>;
+ qcom,pwm-channel = <6>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <3>;
+ linux,default-trigger =
+ "battery-charging";
+ };
+
+ qcom,rgb_1 {
+ label = "rgb";
+ linux,name = "led:rgb_green";
+ qcom,mode = <0>;
+ qcom,pwm-channel = <5>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <4>;
+ linux,default-trigger = "battery-full";
+ };
+ };
+
+ qcom,leds@d100 {
+ status = "disabled";
+ };
+
+ qcom,leds@d2000 {
+ status = "disabled";
+ };
+
+ qcom,leds@d300 {
+ status = "okay";
+ qcom,flash_0 {
+ qcom,max-current = <1000>;
+ qcom,default-state = "off";
+ qcom,headroom = <0>;
+ qcom,duration = <1280>;
+ qcom,clamp-curr = <200>;
+ qcom,startup-dly = <1>;
+ qcom,safety-timer;
+ label = "flash";
+ linux,default-trigger =
+ "flash0_trigger";
+ qcom,id = <1>;
+ linux,name = "led:flash_0";
+ qcom,current = <625>;
+ };
+
+ qcom,flash_1 {
+ qcom,max-current = <1000>;
+ qcom,default-state = "off";
+ qcom,headroom = <0>;
+ qcom,duration = <1280>;
+ qcom,clamp-curr = <200>;
+ qcom,startup-dly = <1>;
+ qcom,safety-timer;
+ linux,default-trigger =
+ "flash1_trigger";
+ label = "flash";
+ qcom,id = <2>;
+ linux,name = "led:flash_1";
+ qcom,current = <625>;
+ };
+ };
+
+ qcom,leds@d400 {
+ status = "disabled";
+ };
+
+ qcom,leds@d500 {
+ status = "disabled";
+ };
+
+ qcom,leds@d600 {
+ status = "disabled";
+ };
+
+ qcom,leds@d700 {
+ status = "disabled";
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index f391621..0f65dc8 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -11,6 +11,7 @@
*/
/include/ "msm8974-camera-sensor-liquid.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -76,6 +77,9 @@
qcom,hdmi_tx@fd922100 {
status = "ok";
+
+ qcom,hdmi-tx-mux-sel = <&pm8841_mpps 3 0>;
+ qcom,hdmi-tx-mux-en = <&pm8841_mpps 4 0>;
};
drv2667_vreg: drv2667_vdd_vreg {
@@ -207,6 +211,12 @@
startup-delay-us = <12000>;
enable-active-high;
};
+
+ sound {
+ qcom,model = "msm8974-taiko-liquid-snd-card";
+ qcom,ext-spk-amp-supply = <&ext_5v>;
+ qcom,ext-spk-amp-gpio = <&pm8841_mpps 1 0>;
+ };
};
&usb3 {
@@ -370,9 +380,25 @@
};
gpio@e000 { /* GPIO 33 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,invert = <1>;
+ qcom,master-en = <1>;
};
gpio@e100 { /* GPIO 34 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,invert = <0>;
+ qcom,master-en = <1>;
};
gpio@e200 { /* GPIO 35 */
@@ -437,6 +463,12 @@
&pm8841_mpps {
mpp@a000 { /* MPP 1 */
+ /* CLASS_D_EN speakers PA */
+ qcom,mode = <1>; /* DIG_OUT */
+ qcom,output-type = <0>; /* PNP_PIN_OUT_BUF_CMOS */
+ qcom,vin-sel = <2>; /* S3A 1.8v */
+ qcom,src-select = <0>; /* CONSTANT */
+ qcom,master-en = <1>; /* ENABLE MPP */
};
mpp@a100 { /* MPP 2 */
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index a51a38d..b765611 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -48,11 +48,10 @@
qcom,hdmi-tx-max-voltage-level = <0 1800000 1800000>;
qcom,hdmi-tx-op-mode = <0 1800000 0>;
- gpios = <&msmgpio 31 0>,
- <&msmgpio 32 0>,
- <&msmgpio 33 0>,
- <&msmgpio 34 0>;
- qcom,hdmi-tx-gpio-names = "cec-pin", "hpd-ddc-clk", "hpd-ddc-data", "hpd-pin";
+ qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+ qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+ qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+ qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
};
qcom,mdss_wb_panel {
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index e2c80c2..8563996 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -192,6 +193,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc2 {
#address-cells = <0>;
interrupt-parent = <&sdcc2>;
@@ -213,6 +274,10 @@
qcom,otg-capability;
};
+&pm8941_bms {
+ status = "ok";
+};
+
&pm8941_chg {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 495d3fb..3f7e9de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -71,7 +71,6 @@
rpm-regulator-smpb2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8841_s2: regulator-s2 {
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1050000>;
@@ -131,7 +130,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_s2: regulator-s2 {
regulator-min-microvolt = <2150000>;
regulator-max-microvolt = <2150000>;
@@ -284,7 +282,6 @@
rpm-regulator-ldoa12 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_l12: regulator-l12 {
parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 93ba2bf..0fd5c97 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -156,6 +156,19 @@
qcom,bus-width = <8>;
qcom,nonremovable;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ qcom,msm-bus,name = "sdcc1";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
+ <78 512 6656 13312>, /* 13 MB/s*/
+ <78 512 13312 26624>, /* 26 MB/s */
+ <78 512 26624 53248>, /* 52 MB/s */
+ <78 512 53248 106496>, /* 104 MB/s */
+ <78 512 106496 212992>, /* 208 MB/s */
+ <78 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <42>;
};
@@ -190,6 +203,19 @@
qcom,xpc;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
qcom,current-limit = <800>;
+
+ qcom,msm-bus,name = "sdcc2";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 6656 13312>, /* 13 MB/s*/
+ <81 512 13312 26624>, /* 26 MB/s */
+ <81 512 26624 53248>, /* 52 MB/s */
+ <81 512 53248 106496>, /* 104 MB/s */
+ <81 512 106496 212992>, /* 208 MB/s */
+ <81 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <44>;
};
@@ -222,6 +248,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc3";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
+ <79 512 6656 13312>, /* 13 MB/s*/
+ <79 512 13312 26624>, /* 26 MB/s */
+ <79 512 26624 53248>, /* 52 MB/s */
+ <79 512 53248 106496>, /* 104 MB/s */
+ <79 512 106496 212992>, /* 208 MB/s */
+ <79 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -254,6 +293,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc4";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
+ <80 512 6656 13312>, /* 13 MB/s*/
+ <80 512 13312 26624>, /* 26 MB/s */
+ <80 512 26624 53248>, /* 52 MB/s */
+ <80 512 53248 106496>, /* 104 MB/s */
+ <80 512 106496 212992>, /* 208 MB/s */
+ <80 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -540,116 +592,6 @@
<0x1e50008a>, /* LPG_CHAN10 */
<0x1e60008b>, /* LPG_CHAN11 */
<0x1e70008c>; /* LPG_CHAN12 */
-
- qcom,pm8941@1 {
- qcom,leds@d300 {
- status = "okay";
- qcom,flash_0 {
- qcom,max-current = <1000>;
- qcom,default-state = "off";
- qcom,headroom = <0>;
- qcom,duration = <1280>;
- qcom,clamp-curr = <200>;
- qcom,startup-dly = <1>;
- qcom,safety-timer;
- label = "flash";
- linux,default-trigger =
- "flash0_trigger";
- qcom,id = <1>;
- linux,name = "led:flash_0";
- qcom,current = <625>;
- };
-
- qcom,flash_1 {
- qcom,max-current = <1000>;
- qcom,default-state = "off";
- qcom,headroom = <0>;
- qcom,duration = <1280>;
- qcom,clamp-curr = <200>;
- qcom,startup-dly = <1>;
- qcom,safety-timer;
- linux,default-trigger =
- "flash1_trigger";
- label = "flash";
- qcom,id = <2>;
- linux,name = "led:flash_1";
- qcom,current = <625>;
- };
- };
-
- qcom,leds@d400 {
- status = "disabled";
- };
-
- qcom,leds@d500 {
- status = "disabled";
- };
-
- qcom,leds@d600 {
- status = "disabled";
- };
-
- qcom,leds@d700 {
- status = "disabled";
- };
-
- qcom,leds@d800 {
- status = "okay";
- qcom,wled_0 {
- label = "wled";
- linux,name = "wled:backlight";
- linux,default-trigger = "bkl-trigger";
- qcom,cs-out-en;
- qcom,op-fdbck;
- qcom,default-state = "off";
- qcom,max-current = <25>;
- qcom,ctrl-delay-us = <0>;
- qcom,boost-curr-lim = <3>;
- qcom,cp-sel = <0>;
- qcom,switch-freq = <2>;
- qcom,ovp-val = <2>;
- qcom,num-strings = <1>;
- qcom,id = <0>;
- };
- };
-
- qcom,leds@d900 {
- status = "disabled";
- };
-
- qcom,leds@da00 {
- status = "disabled";
- };
-
- qcom,leds@db00 {
- status = "disabled";
- };
-
- qcom,leds@dc00 {
- status = "disabled";
- };
-
- qcom,leds@dd00 {
- status = "disabled";
- };
-
- qcom,leds@de00 {
- status = "disabled";
- };
-
- qcom,leds@df00 {
- status = "disabled";
- };
-
- qcom,leds@e000 {
- status = "disabled";
- };
-
- qcom,leds@e100 {
- status = "disabled";
- };
-
- };
};
i2c@f9967000 { /* BLSP#11 */
@@ -724,13 +666,14 @@
<0xfd4ab000 0x4>;
interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
interrupt-names = "irq", "otg_irq", "hs_phy_irq";
- SSUSB_VDDCX-supply = <&pm8841_s2>;
+ ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
SSUSB_1p8-supply = <&pm8941_l6>;
- HSUSB_VDDCX-supply = <&pm8841_s2>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
HSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_3p3-supply = <&pm8941_l24>;
vbus_dwc3-supply = <&pm8941_mvs1>;
qcom,dwc-usb3-msm-dbm-eps = <4>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,msm-bus,name = "usb3";
qcom,msm-bus,num-cases = <2>;
@@ -757,6 +700,7 @@
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <0>;
};
qcom,msm-pcm {
@@ -905,6 +849,16 @@
};
};
+ qcom,msm-dai-mi2s {
+ compatible = "qcom,msm-dai-mi2s";
+ qcom,msm-dai-q6-mi2s-quat {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <3>;
+ qcom,msm-mi2s-rx-lines = <1>;
+ qcom,msm-mi2s-tx-lines = <2>;
+ };
+ };
+
qcom,msm-pcm-hostless {
compatible = "qcom,msm-pcm-hostless";
};
@@ -933,7 +887,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
@@ -1029,6 +983,8 @@
qcom,qseecom@fe806000 {
compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,active-only = <0>;
@@ -1212,7 +1168,7 @@
};
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
- qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
+ qcom,memblock-remove = <0x7f00000 0x8000000>; /* Address and Size of Hole */
};
qcom,smem@fa00000 {
@@ -1284,6 +1240,34 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8x41-rpm-regulator.dtsi"
/include/ "msm-pm8841.dtsi"
/include/ "msm-pm8941.dtsi"
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index d62f7e7..2839864 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -115,6 +115,21 @@
qcom,lpm-level@3 {
reg = <0x3>;
+ qcom,mode = <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
+ qcom,xo = <1>; /* ON */
+ qcom,l2 = <0>; /* GDHS */
+ qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
+ qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
+ qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
+ qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
+ qcom,latency-us = <4500>;
+ qcom,ss-power = <5000>;
+ qcom,energy-overhead = <60350000>;
+ qcom,time-overhead = <7300>;
+ };
+
+ qcom,lpm-level@4 {
+ reg = <0x4>;
qcom,mode= <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
qcom,xo = <0>; /* OFF */
qcom,l2 = <0>; /* OFF */
@@ -128,8 +143,8 @@
qcom,time-overhead = <13300>;
};
- qcom,lpm-level@4 {
- reg = <0x4>;
+ qcom,lpm-level@5 {
+ reg = <0x5>;
qcom,mode= <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
qcom,xo = <0>; /* OFF */
qcom,l2 = <0>; /* OFF */
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index b128648..24f616d 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -23,7 +23,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8019_s2: regulator-s2 {
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <1250000>;
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index b79f370..d1c731e 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -88,6 +88,47 @@
reg = <0xfc42b0c8 0xc8>;
};
+ hsic@f9a15000 {
+ compatible = "qcom,hsic-host";
+ reg = <0xf9a15000 0x400>;
+ interrupts = <0 136 0>;
+ interrupt-names = "core_irq";
+ HSIC_VDDCX-supply = <&pm8019_l12>;
+ HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+ };
+
+ qcom,usbbam@f9a44000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xf9a44000 0x11000>;
+ reg-names = "hsusb";
+ interrupts = <0 135 0>;
+ interrupt-names = "hsusb";
+ qcom,usb-active-bam = <1>;
+ qcom,usb-total-bam-num = <3>;
+ qcom,usb-bam-num-pipes = <16>;
+ qcom,ignore-core-reset-ack;
+
+ qcom,pipe0 {
+ label = "usb-to-ipa";
+ qcom,usb-bam-type = <1>;
+ qcom,usb-bam-mem-type = <2>;
+ qcom,src-bam-physical-address = <0xf9a44000>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,data-fifo-size = <0x600>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe1 {
+ label = "ipa-to-usb";
+ qcom,usb-bam-type = <1>;
+ qcom,usb-bam-mem-type = <2>;
+ qcom,dst-bam-physical-address = <0xf9a44000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-size = <0x600>;
+ qcom,descriptor-fifo-size = <0x100>;
+ };
+ };
+
qcom,nand@f9ac0000 {
compatible = "qcom,msm-nand";
reg = <0xf9ac0000 0x1000>,
@@ -245,6 +286,42 @@
interrupts = <0 29 1>;
};
+ qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+ };
+
qcom,acpuclk@f9010000 {
compatible = "qcom,acpuclk-9625";
reg = <0xf9010008 0x10>,
@@ -393,6 +470,7 @@
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <2>;
};
qcom,msm-pcm {
@@ -440,6 +518,11 @@
qcom,msm-dai-q6 {
compatible = "qcom,msm-dai-q6";
};
+
+ qcom,mss {
+ compatible = "qcom,pil-q6v5-mss";
+ interrupts = <0 24 1>;
+ };
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index e2e05b2..2dd4b30 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -46,7 +46,7 @@
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-# CONFIG_SMP_ON_UP is not set
+CONFIG_SMP=y
CONFIG_ARM_ARCH_TIMER=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index a613932..d5e15f1 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -414,7 +414,6 @@
CONFIG_USB_EHCI_MSM_HOST4=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 0d63836..386f311 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -417,7 +417,6 @@
CONFIG_USB_EHCI_MSM_HOST4=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 0070e22..e4b60ff 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -37,7 +37,6 @@
CONFIG_EFI_PARTITION=y
CONFIG_ARCH_MSM=y
CONFIG_ARCH_MSM8974=y
-CONFIG_ARCH_MSM8226=y
CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
# CONFIG_MSM_STACKED_MEMORY is not set
CONFIG_CPU_HAS_L2_PMU=y
@@ -75,6 +74,7 @@
CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
@@ -217,6 +217,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
CONFIG_QSEECOM=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
@@ -270,6 +271,7 @@
CONFIG_GPIO_QPNP_PIN=y
CONFIG_GPIO_QPNP_PIN_DEBUG=y
CONFIG_POWER_SUPPLY=y
+CONFIG_SMB350_CHARGER=y
CONFIG_BATTERY_BQ28400=y
CONFIG_QPNP_CHARGER=y
CONFIG_BATTERY_BCL=y
@@ -353,6 +355,8 @@
CONFIG_MMC_BLOCK_MINORS=32
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_MSM=y
CONFIG_MMC_MSM_SPS_SUPPORT=y
CONFIG_LEDS_QPNP=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 33400ea..0042406 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -219,6 +219,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
CONFIG_QSEECOM=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
@@ -356,6 +357,8 @@
CONFIG_MMC_BLOCK_MINORS=32
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_MSM=y
CONFIG_MMC_MSM_SPS_SUPPORT=y
CONFIG_LEDS_QPNP=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index b9add04..d47870e 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -21,6 +21,7 @@
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
@@ -42,6 +43,9 @@
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_MSM_DLOAD_MODE=y
@@ -125,6 +129,7 @@
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
+CONFIG_MSM_BUS_SCALING=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
@@ -164,6 +169,7 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_IPA=y
CONFIG_SPS=y
CONFIG_USB_BAM=y
CONFIG_SPS_SUPPORT_BAMDMA=y
@@ -251,3 +257,9 @@
CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_MSM_ADSP_LOADER=m
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index a0868c7..42e250d 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -67,6 +67,7 @@
select MSM_PM2 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_MODEM_RESTART
+ select ARM_HAS_SG_CHAIN
config ARCH_QSD8X50
bool "QSD8X50"
@@ -177,6 +178,7 @@
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
select MSM_ULTRASOUND_A
+ select MSM_IOMMU_GPU_SYNC
select GENERIC_TIME_VSYSCALL
select USE_USER_ACCESSIBLE_TIMERS
select ARM_USE_USER_ACCESSIBLE_TIMERS
@@ -213,6 +215,7 @@
select HOLES_IN_ZONE if SPARSEMEM
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
+ select MSM_IOMMU_GPU_SYNC
select GENERIC_TIME_VSYSCALL
select USE_USER_ACCESSIBLE_TIMERS
select ARM_USE_USER_ACCESSIBLE_TIMERS
@@ -245,6 +248,7 @@
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
select MSM_ULTRASOUND_A
+ select MSM_IOMMU_GPU_SYNC
select GENERIC_TIME_VSYSCALL
select USE_USER_ACCESSIBLE_TIMERS
select ARM_USE_USER_ACCESSIBLE_TIMERS
@@ -279,6 +283,7 @@
select MEMORY_HOLE_CARVEOUT
select MSM_RPM_STATS_LOG
select QMI_ENCDEC
+ select DONT_MAP_HOLE_AFTER_MEMBANK0
config ARCH_MPQ8092
bool "MPQ8092"
@@ -369,7 +374,6 @@
bool "MSM8910"
select ARM_GIC
select GIC_SECURE
- select SMP
select ARCH_MSM_CORTEXMP
select CPU_V7
select MSM_SCM if SMP
@@ -385,7 +389,6 @@
bool "MSM8226"
select ARM_GIC
select GIC_SECURE
- select SMP
select ARCH_MSM_CORTEXMP
select CPU_V7
select MSM_SCM if SMP
@@ -394,6 +397,8 @@
select MULTI_IRQ_HANDLER
select GPIO_MSM_V3
select MSM_GPIOMUX
+ select MSM_NATIVE_RESTART
+ select MSM_RESTART_V2
endmenu
choice
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 8690df8..2c7424e 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -46,15 +46,15 @@
endif
obj-$(CONFIG_SMP) += headsmp.o
+ifdef CONFIG_ARCH_MSM_CORTEXMP
ifdef CONFIG_ARCH_MSM8625
obj-$(CONFIG_SMP) += platsmp-8625.o
else
-ifdef CONFIG_ARCH_MSM8910
obj-$(CONFIG_SMP) += platsmp-8910.o
+endif
else
obj-$(CONFIG_SMP) += platsmp.o
endif
-endif
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_MSM_CPU_AVS) += avs.o
@@ -295,10 +295,11 @@
obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o clock-mdss-8974.o
obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
obj-$(CONFIG_ARCH_MSM9625) += gdsc.o
+obj-$(CONFIG_ARCH_MSM8226) += gdsc.o
obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += clock-local2.o clock-pll.o clock-9625.o clock-rpm.o clock-voter.o acpuclock-9625.o
-obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o acpuclock-8930ab.o
obj-$(CONFIG_ARCH_MPQ8092) += board-8092.o board-8092-gpiomux.o
obj-$(CONFIG_ARCH_MSM8226) += board-8226.o board-8226-gpiomux.o
obj-$(CONFIG_ARCH_MSM8910) += board-8910.o board-8910-gpiomux.o
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index 88237af..5c1d3e1 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -213,6 +213,32 @@
{ 0, { 0 } }
};
+static struct acpu_level tbl_faster[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 850000 },
+ { 0, { 432000, HFPLL, 2, 0x20 }, L2(6), 875000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 875000 },
+ { 0, { 540000, HFPLL, 2, 0x28 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 900000 },
+ { 0, { 648000, HFPLL, 1, 0x18 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 925000 },
+ { 0, { 756000, HFPLL, 1, 0x1C }, L2(6), 962500 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 962500 },
+ { 0, { 864000, HFPLL, 1, 0x20 }, L2(6), 975000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 975000 },
+ { 0, { 972000, HFPLL, 1, 0x24 }, L2(6), 1000000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 1000000 },
+ { 0, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1050000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1050000 },
+ { 0, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1075000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1075000 },
+ { 0, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1100000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1100000 },
+ { 0, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1112500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
+ { 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1125000 },
+ { 0, { 0 } }
+};
+
static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 975000 },
@@ -369,7 +395,7 @@
[0][PVS_SLOW] = {tbl_slow, sizeof(tbl_slow), 0 },
[0][PVS_NOMINAL] = {tbl_nom, sizeof(tbl_nom), 25000 },
[0][PVS_FAST] = {tbl_fast, sizeof(tbl_fast), 25000 },
- [0][PVS_FASTER] = {tbl_fast, sizeof(tbl_fast), 25000 },
+ [0][PVS_FASTER] = {tbl_faster, sizeof(tbl_faster), 25000 },
[1][0] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
[1][1] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
diff --git a/arch/arm/mach-msm/acpuclock-8930ab.c b/arch/arm/mach-msm/acpuclock-8930ab.c
new file mode 100644
index 0000000..764ae41
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930ab.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 37,
+ .nom_vdd_l_max = 74,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable_pm8917[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "krait0_l23", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "krait1_l23", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "l2_l23", 1800000 },
+ },
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .sec_clk_sel = 2,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(4800), /* At least 600 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8930ab",
+};
+
+/* TODO: Update new L2 freqs once they are available */
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0x20 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0x24 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0x18 }, LVL_NOM, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0x1A }, LVL_NOM, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0x22 }, LVL_HIGH, 1150000, 5 },
+ [11] = { { 972000, HFPLL, 1, 0x24 }, LVL_HIGH, 1150000, 5 },
+ [12] = { { 1026000, HFPLL, 1, 0x26 }, LVL_HIGH, 1150000, 5 },
+ [13] = { { 1080000, HFPLL, 1, 0x28 }, LVL_HIGH, 1150000, 5 },
+ [14] = { { 1134000, HFPLL, 1, 0x2A }, LVL_HIGH, 1150000, 5 },
+ [15] = { { 1188000, HFPLL, 1, 0x2C }, LVL_HIGH, 1150000, 5 },
+ { }
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+ { 1, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+ { 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+ { 1, { 1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+ { 1, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+ { 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+ { 1, { 1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+ { 1, { 1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+ { 1, { 1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+ { 1, { 1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+ { 0, { 0 } }
+};
+
+/* TODO: Update boost voltage once the pvs data is available */
+static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = {
+[0][PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[0][PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 0 },
+[0][PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 0 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930ab_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .pte_efuse_phys = 0x007000C0,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8930ab_probe(struct platform_device *pdev)
+{
+ struct acpuclk_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata && pdata->uses_pm8917)
+ acpuclk_8930ab_params.scalable = scalable_pm8917;
+
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8930ab_params);
+}
+
+static struct platform_driver acpuclk_8930ab_driver = {
+ .driver = {
+ .name = "acpuclk-8930ab",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8930ab_init(void)
+{
+ return platform_driver_probe(&acpuclk_8930ab_driver,
+ acpuclk_8930ab_probe);
+}
+device_initcall(acpuclk_8930ab_init);
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 61213cf..0fbd6dc 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -113,14 +113,14 @@
};
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { { 300000, PLL_0, 0, 0 }, LVL_LOW, 950000, 0 },
- [1] = { { 384000, HFPLL, 2, 40 }, LVL_NOM, 950000, 1 },
- [2] = { { 460800, HFPLL, 2, 48 }, LVL_NOM, 950000, 1 },
- [3] = { { 537600, HFPLL, 1, 28 }, LVL_NOM, 950000, 2 },
- [4] = { { 576000, HFPLL, 1, 30 }, LVL_NOM, 950000, 2 },
- [5] = { { 652800, HFPLL, 1, 34 }, LVL_NOM, 950000, 2 },
- [6] = { { 729600, HFPLL, 1, 38 }, LVL_NOM, 950000, 2 },
- [7] = { { 806400, HFPLL, 1, 42 }, LVL_NOM, 950000, 2 },
+ [0] = { { 300000, PLL_0, 0, 0 }, LVL_LOW, 1050000, 0 },
+ [1] = { { 345600, HFPLL, 2, 36 }, LVL_NOM, 1050000, 1 },
+ [2] = { { 422400, HFPLL, 2, 44 }, LVL_NOM, 1050000, 1 },
+ [3] = { { 499200, HFPLL, 2, 52 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 576000, HFPLL, 1, 30 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 652800, HFPLL, 1, 34 }, LVL_NOM, 1050000, 2 },
+ [6] = { { 729600, HFPLL, 1, 38 }, LVL_NOM, 1050000, 2 },
+ [7] = { { 806400, HFPLL, 1, 42 }, LVL_NOM, 1050000, 2 },
[8] = { { 883200, HFPLL, 1, 46 }, LVL_HIGH, 1050000, 2 },
[9] = { { 960000, HFPLL, 1, 50 }, LVL_HIGH, 1050000, 2 },
[10] = { { 1036800, HFPLL, 1, 54 }, LVL_HIGH, 1050000, 3 },
@@ -143,30 +143,30 @@
};
static struct acpu_level acpu_freq_tbl[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 950000, 100000 },
- { 1, { 384000, HFPLL, 2, 40 }, L2(3), 950000, 3200000 },
- { 1, { 460800, HFPLL, 2, 48 }, L2(3), 950000, 3200000 },
- { 1, { 537600, HFPLL, 1, 28 }, L2(5), 950000, 3200000 },
- { 1, { 576000, HFPLL, 1, 30 }, L2(5), 950000, 3200000 },
- { 1, { 652800, HFPLL, 1, 34 }, L2(5), 950000, 3200000 },
- { 1, { 729600, HFPLL, 1, 38 }, L2(5), 950000, 3200000 },
- { 1, { 806400, HFPLL, 1, 42 }, L2(7), 950000, 3200000 },
- { 1, { 883200, HFPLL, 1, 46 }, L2(7), 950000, 3200000 },
- { 1, { 960000, HFPLL, 1, 50 }, L2(7), 950000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 54 }, L2(7), 950000, 3200000 },
- { 1, { 1113600, HFPLL, 1, 58 }, L2(12), 1050000, 3200000 },
- { 1, { 1190400, HFPLL, 1, 62 }, L2(12), 1050000, 3200000 },
- { 1, { 1267200, HFPLL, 1, 66 }, L2(12), 1050000, 3200000 },
- { 1, { 1344000, HFPLL, 1, 70 }, L2(15), 1050000, 3200000 },
- { 1, { 1420800, HFPLL, 1, 74 }, L2(15), 1050000, 3200000 },
- { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 1050000, 3200000 },
- { 0, { 1574400, HFPLL, 1, 82 }, L2(20), 1050000, 3200000 },
- { 0, { 1651200, HFPLL, 1, 86 }, L2(20), 1050000, 3200000 },
- { 0, { 1728000, HFPLL, 1, 90 }, L2(20), 1050000, 3200000 },
- { 0, { 1804800, HFPLL, 1, 94 }, L2(25), 1050000, 3200000 },
- { 0, { 1881600, HFPLL, 1, 98 }, L2(25), 1050000, 3200000 },
- { 0, { 1958400, HFPLL, 1, 102 }, L2(25), 1050000, 3200000 },
- { 0, { 1996800, HFPLL, 1, 104 }, L2(25), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 850000, 100000 },
+ { 0, { 345600, HFPLL, 2, 36 }, L2(0), 850000, 3200000 },
+ { 1, { 422400, HFPLL, 2, 44 }, L2(0), 850000, 3200000 },
+ { 0, { 499200, HFPLL, 2, 52 }, L2(0), 850000, 3200000 },
+ { 1, { 576000, HFPLL, 1, 30 }, L2(0), 850000, 3200000 },
+ { 1, { 652800, HFPLL, 1, 34 }, L2(16), 850000, 3200000 },
+ { 0, { 729600, HFPLL, 1, 38 }, L2(16), 850000, 3200000 },
+ { 1, { 806400, HFPLL, 1, 42 }, L2(16), 850000, 3200000 },
+ { 1, { 883200, HFPLL, 1, 46 }, L2(16), 870000, 3200000 },
+ { 1, { 960000, HFPLL, 1, 50 }, L2(16), 880000, 3200000 },
+ { 1, { 1036800, HFPLL, 1, 54 }, L2(16), 900000, 3200000 },
+ { 1, { 1113600, HFPLL, 1, 58 }, L2(16), 915000, 3200000 },
+ { 1, { 1190400, HFPLL, 1, 62 }, L2(16), 935000, 3200000 },
+ { 1, { 1267200, HFPLL, 1, 66 }, L2(16), 950000, 3200000 },
+ { 1, { 1344000, HFPLL, 1, 70 }, L2(16), 970000, 3200000 },
+ { 1, { 1420800, HFPLL, 1, 74 }, L2(16), 985000, 3200000 },
+ { 1, { 1497600, HFPLL, 1, 78 }, L2(16), 1000000, 3200000 },
+ { 1, { 1574400, HFPLL, 1, 82 }, L2(16), 1015000, 3200000 },
+ { 1, { 1651200, HFPLL, 1, 86 }, L2(16), 1030000, 3200000 },
+ { 1, { 1728000, HFPLL, 1, 90 }, L2(16), 1050000, 3200000 },
+ { 0, { 1804800, HFPLL, 1, 94 }, L2(16), 1050000, 3200000 },
+ { 0, { 1881600, HFPLL, 1, 98 }, L2(16), 1050000, 3200000 },
+ { 0, { 1958400, HFPLL, 1, 102 }, L2(16), 1050000, 3200000 },
+ { 0, { 1996800, HFPLL, 1, 104 }, L2(16), 1050000, 3200000 },
{ 0, { 0 } }
};
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index b9d0527..10c4d6c 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -977,7 +977,11 @@
/* Fall through. */
case CPU_UP_CANCELED:
acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
+
+ regulator_disable(sc->vreg[VREG_CORE].reg);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
+ regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
+ sc->vreg[VREG_CORE].max_vdd);
break;
case CPU_UP_PREPARE:
if (!sc->initialized) {
@@ -988,10 +992,20 @@
}
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
+
+ rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ sc->vreg[VREG_CORE].cur_vdd,
+ sc->vreg[VREG_CORE].max_vdd);
+ if (rc < 0)
+ return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
+ rc = regulator_enable(sc->vreg[VREG_CORE].reg);
+ if (rc < 0)
+ return NOTIFY_BAD;
+
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
break;
default:
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index 5ebb010..38ac83e 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -182,6 +182,12 @@
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
+ .end = 0x0430ffff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = KGSL_3D0_SHADER_MEMORY,
+ .start = 0x04310000, /* Shader Mem Address */
.end = 0x0431ffff,
.flags = IORESOURCE_MEM,
},
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index fe2d2d2..f9d1533 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -433,6 +433,8 @@
.shutdown_soc_valid_limit = 20,
.adjust_soc_low_threshold = 25,
.chg_term_ua = CHG_TERM_MA * 1000,
+ .normal_voltage_calc_ms = 20000,
+ .low_voltage_calc_ms = 1000,
};
static struct pm8921_platform_data
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 851f7d9..a66495d 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -455,7 +455,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index e5263c7..3e90489 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -33,6 +33,7 @@
#include <mach/board.h>
#include <mach/gpiomux.h>
#include <mach/msm_iomap.h>
+#include <mach/restart.h>
#ifdef CONFIG_ION_MSM
#include <mach/ion.h>
#endif
@@ -63,6 +64,12 @@
CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
CLK_DUMMY("iface_clk", HSUSB_IFACE_CLK, "f9a55000.usb", OFF),
CLK_DUMMY("core_clk", HSUSB_CORE_CLK, "f9a55000.usb", OFF),
+ CLK_DUMMY("iface_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("core_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("bus_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("iface_clk", NULL, "msm_sdcc.2", OFF),
+ CLK_DUMMY("core_clk", NULL, "msm_sdcc.2", OFF),
+ CLK_DUMMY("bus_clk", NULL, "msm_sdcc.2", OFF),
};
static struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -70,6 +77,14 @@
.size = ARRAY_SIZE(msm_clocks_dummy),
};
+static struct of_dev_auxdata msm8226_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
+ "msm_sdcc.2", NULL),
+ {}
+};
+
static struct reserve_info msm8226_reserve_info __initdata = {
.memtype_reserve_table = msm8226_reserve_table,
.paddr_to_memtype = msm8226_paddr_to_memtype,
@@ -89,13 +104,16 @@
void __init msm8226_init(void)
{
+ struct of_dev_auxdata *adata = msm8226_auxdata_lookup;
+
msm8226_init_gpiomux();
+
msm_clock_init(&msm_dummy_clock_init_data);
if (socinfo_init() < 0)
pr_err("%s: socinfo_init() failed\n", __func__);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
}
static const char *msm8226_dt_match[] __initconst = {
@@ -111,5 +129,6 @@
.timer = &msm_dt_timer,
.dt_compat = msm8226_dt_match,
.reserve = msm8226_reserve,
- .init_very_early = msm8226_early_memory
+ .init_very_early = msm8226_early_memory,
+ .restart = msm_restart,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-8910.c b/arch/arm/mach-msm/board-8910.c
index b031dac..42fe1ea 100644
--- a/arch/arm/mach-msm/board-8910.c
+++ b/arch/arm/mach-msm/board-8910.c
@@ -35,12 +35,29 @@
#ifdef CONFIG_ION_MSM
#include <mach/ion.h>
#endif
+#include <mach/msm_memtypes.h>
#include <mach/socinfo.h>
#include <mach/board.h>
#include <mach/clk-provider.h>
#include "board-dt.h"
#include "clock.h"
+static struct memtype_reserve msm8910_reserve_table[] __initdata = {
+ [MEMTYPE_SMI] = {
+ },
+ [MEMTYPE_EBI0] = {
+ .flags = MEMTYPE_FLAGS_1M_ALIGN,
+ },
+ [MEMTYPE_EBI1] = {
+ .flags = MEMTYPE_FLAGS_1M_ALIGN,
+ },
+};
+
+static int msm8910_paddr_to_memtype(unsigned int paddr)
+{
+ return MEMTYPE_EBI1;
+}
+
static struct clk_lookup msm_clocks_dummy[] = {
CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
@@ -67,6 +84,22 @@
{}
};
+static struct reserve_info msm8910_reserve_info __initdata = {
+ .memtype_reserve_table = msm8910_reserve_table,
+ .paddr_to_memtype = msm8910_paddr_to_memtype,
+};
+
+static void __init msm8910_early_memory(void)
+{
+ reserve_info = &msm8910_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_reserve, msm8910_reserve_table);
+}
+
+static void __init msm8910_reserve(void)
+{
+ msm_reserve();
+}
+
void __init msm8910_init(void)
{
struct of_dev_auxdata *adata = msm8910_auxdata_lookup;
@@ -93,4 +126,6 @@
.timer = &msm_dt_timer,
.dt_compat = msm8910_dt_match,
.restart = msm_restart,
+ .reserve = msm8910_reserve,
+ .init_very_early = msm8910_early_memory
MACHINE_END
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index 7e477b1..4506ea7 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -132,6 +132,15 @@
};
static bool dsi_power_on;
+static struct mipi_dsi_panel_platform_data novatek_pdata;
+static void pm8917_gpio_set_backlight(int bl_level)
+{
+ int gpio24 = PM8917_GPIO_PM_TO_SYS(24);
+ if (bl_level > 0)
+ gpio_set_value_cansleep(gpio24, 1);
+ else
+ gpio_set_value_cansleep(gpio24, 0);
+}
/*
* TODO: When physical 8930/PM8038 hardware becomes
@@ -214,9 +223,13 @@
rc);
return -ENODEV;
}
+ gpio_set_value_cansleep(gpio24, 0);
+ novatek_pdata.gpio_set_backlight =
+ pm8917_gpio_set_backlight;
}
dsi_power_on = true;
}
+
if (on) {
rc = regulator_set_optimum_mode(reg_l8, 100000);
if (rc < 0) {
@@ -256,8 +269,6 @@
gpio_set_value(DISP_RST_GPIO, 1);
gpio_set_value(DISP_3D_2D_MODE, 1);
usleep(20);
- if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
- gpio_set_value_cansleep(gpio24, 1);
} else {
gpio_set_value(DISP_RST_GPIO, 0);
@@ -294,8 +305,6 @@
}
gpio_set_value(DISP_3D_2D_MODE, 0);
usleep(20);
- if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
- gpio_set_value_cansleep(gpio24, 0);
}
return 0;
}
@@ -441,7 +450,7 @@
#ifdef CONFIG_MSM_BUS_SCALING
.mdp_bus_scale_table = &mdp_bus_scale_pdata,
#endif
- .mdp_rev = MDP_REV_42,
+ .mdp_rev = MDP_REV_43,
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.mem_hid = BIT(ION_CP_MM_HEAP_ID),
#else
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 578c665..3eb7d8a 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -88,6 +88,12 @@
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
+ .end = 0x0430ffff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = KGSL_3D0_SHADER_MEMORY,
+ .start = 0x04310000,
.end = 0x0431ffff,
.flags = IORESOURCE_MEM,
},
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index 618f83b..dd4b67e 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -472,6 +472,8 @@
.adjust_soc_low_threshold = 25,
.chg_term_ua = CHG_TERM_MA * 1000,
.rconn_mohm = 18,
+ .normal_voltage_calc_ms = 20000,
+ .low_voltage_calc_ms = 1000,
};
static struct pm8038_platform_data pm8038_platform_data __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index c74dc26..eaebea0 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -189,12 +189,14 @@
REGULATOR_SUPPLY("krait0", "acpuclk-8627"),
REGULATOR_SUPPLY("krait0", "acpuclk-8930"),
REGULATOR_SUPPLY("krait0", "acpuclk-8930aa"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930ab"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8038_s6", NULL),
REGULATOR_SUPPLY("krait1", "acpuclk-8627"),
REGULATOR_SUPPLY("krait1", "acpuclk-8930"),
REGULATOR_SUPPLY("krait1", "acpuclk-8930aa"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930ab"),
};
VREG_CONSUMERS(LVS1) = {
REGULATOR_SUPPLY("8038_lvs1", NULL),
@@ -447,7 +449,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
@@ -564,6 +567,14 @@
RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930aa"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930aa"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930aa"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8930ab"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8930ab"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8930ab"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930ab"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930ab"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930ab"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930ab"),
};
struct rpm_regulator_platform_data
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index 6f58771..9a2967a 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -206,12 +206,14 @@
REGULATOR_SUPPLY("krait0", "acpuclk-8627"),
REGULATOR_SUPPLY("krait0", "acpuclk-8930"),
REGULATOR_SUPPLY("krait0", "acpuclk-8930aa"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930ab"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8917_s6", NULL),
REGULATOR_SUPPLY("krait1", "acpuclk-8627"),
REGULATOR_SUPPLY("krait1", "acpuclk-8930"),
REGULATOR_SUPPLY("krait1", "acpuclk-8930aa"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930ab"),
};
VREG_CONSUMERS(S7) = {
REGULATOR_SUPPLY("8917_s7", NULL),
@@ -485,7 +487,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
@@ -631,6 +634,18 @@
RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930aa"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930aa"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930aa"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_l23", "acpuclk-8930ab"),
+ RPM_REG_MAP(S8, 0, 1, "krait0_s8", "acpuclk-8930ab"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_l23", "acpuclk-8930ab"),
+ RPM_REG_MAP(S8, 0, 2, "krait1_s8", "acpuclk-8930ab"),
+ RPM_REG_MAP(L23, 0, 6, "l2_l23", "acpuclk-8930ab"),
+ RPM_REG_MAP(S8, 0, 6, "l2_s8", "acpuclk-8930ab"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930ab"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930ab"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930ab"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930ab"),
+
};
struct rpm_regulator_platform_data
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index ece65d6..512ae72 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -2755,6 +2755,9 @@
pdata = msm8930_device_acpuclk.dev.platform_data;
pdata->uses_pm8917 = true;
+
+ pdata = msm8930ab_device_acpuclk.dev.platform_data;
+ pdata->uses_pm8917 = true;
}
static void __init msm8930_cdp_init(void)
@@ -2828,6 +2831,8 @@
platform_device_register(&msm8930_device_acpuclk);
else if (cpu_is_msm8930aa())
platform_device_register(&msm8930aa_device_acpuclk);
+ else if (cpu_is_msm8930ab())
+ platform_device_register(&msm8930ab_device_acpuclk);
platform_add_devices(early_common_devices,
ARRAY_SIZE(early_common_devices));
if (socinfo_get_pmic_model() != PMIC_MODEL_PM8917)
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 3052902..ecf5ec6 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -988,6 +988,12 @@
void __init msm8960_init_fb(void)
{
+ uint32_t soc_platform_version = socinfo_get_version();
+
+
+ if (SOCINFO_VERSION_MAJOR(soc_platform_version) >= 3)
+ mdp_pdata.mdp_rev = MDP_REV_43;
+
if (cpu_is_msm8960ab())
mdp_pdata.mdp_rev = MDP_REV_44;
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 2071a55..95a157a 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -431,6 +431,8 @@
.shutdown_soc_valid_limit = 20,
.adjust_soc_low_threshold = 25,
.chg_term_ua = CHG_TERM_MA * 1000,
+ .normal_voltage_calc_ms = 20000,
+ .low_voltage_calc_ms = 1000,
};
#define PM8921_LC_LED_MAX_CURRENT 4 /* I = 4mA */
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index f9e2c8e..397411d 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -382,7 +382,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 9efc60a..b9b3a1f 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1676,7 +1676,7 @@
.reg_base_addr = MSM_SAW0_BASE,
.reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x1F,
#if defined(CONFIG_MSM_AVS_HW)
- .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x50589464,
+ .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x58589464,
.reg_init_values[MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x00020000,
#endif
.reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01,
@@ -1691,7 +1691,7 @@
.reg_base_addr = MSM_SAW1_BASE,
.reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x1F,
#if defined(CONFIG_MSM_AVS_HW)
- .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x50589464,
+ .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x58589464,
.reg_init_values[MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x00020000,
#endif
.reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01,
@@ -2890,8 +2890,18 @@
kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0);
/* 8960PRO nominal clock rate is 320Mhz */
kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 320000000;
+
+ /*
+ * If this an A320 GPU device (MSM8960AB), then
+ * switch the resource table to 8960AB, to reflect the
+ * separate register and shader memory mapping used in A320.
+ */
+
+ msm_kgsl_3d0.num_resources = kgsl_num_resources_8960ab;
+ msm_kgsl_3d0.resource = kgsl_3d0_resources_8960ab;
} else {
kgsl_3d0_pdata->iommu_count = 1;
+
if (SOCINFO_VERSION_MAJOR(soc_platform_version) == 1) {
kgsl_3d0_pdata->pwrlevel[0].gpu_freq = 320000000;
kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 266667000;
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index b092a53..c47b688 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -71,10 +71,17 @@
.paddr_to_memtype = msm8974_paddr_to_memtype,
};
-static void __init msm8974_early_memory(void)
+void __init msm_8974_reserve(void)
{
reserve_info = &msm8974_reserve_info;
of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table);
+ msm_reserve();
+}
+
+static void __init msm8974_early_memory(void)
+{
+ reserve_info = &msm8974_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
}
#define BIMC_BASE 0xfc380000
@@ -389,7 +396,7 @@
.handle_irq = gic_handle_irq,
.timer = &msm_dt_timer,
.dt_compat = msm8974_dt_match,
- .reserve = msm_reserve,
+ .reserve = msm_8974_reserve,
.init_very_early = msm8974_init_very_early,
.restart = msm_restart,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 35b68b1..1022616 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -621,7 +621,7 @@
#define USB_BAM_PHY_BASE 0x12502000
#define HSIC_BAM_PHY_BASE 0x12542000
#define A2_BAM_PHY_BASE 0x124C2000
-static struct usb_bam_pipe_connect msm_usb_bam_connections[MAX_BAMS][4][2] = {
+static struct usb_bam_pipe_connect msm_usb_bam_connections[MAX_BAMS][8][2] = {
[HSUSB_BAM][0][USB_TO_PEER_PERIPHERAL] = {
.src_phy_addr = USB_BAM_PHY_BASE,
.src_pipe_index = 11,
@@ -925,6 +925,9 @@
&msm_cpudai_sec_auxpcm_rx,
&msm_cpudai_sec_auxpcm_tx,
&msm_cpudai_stub,
+ &msm_cpudai_incall_music_rx,
+ &msm_cpudai_incall_record_rx,
+ &msm_cpudai_incall_record_tx,
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 9102875..686bb41 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -259,6 +259,23 @@
},
};
+static struct gpiomux_setting sdc2_card_det_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+ .dir = GPIOMUX_IN,
+};
+
+struct msm_gpiomux_config sdc2_card_det_config[] __initdata = {
+ {
+ .gpio = 66,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &sdc2_card_det_cfg,
+ [GPIOMUX_SUSPENDED] = &sdc2_card_det_cfg,
+ },
+ },
+};
+
void __init msm9625_init_gpiomux(void)
{
int rc;
@@ -277,4 +294,6 @@
ARRAY_SIZE(mdm9625_mi2s_configs));
msm_gpiomux_install(mdm9625_cdc_reset_config,
ARRAY_SIZE(mdm9625_cdc_reset_config));
+ msm_gpiomux_install(sdc2_card_det_config,
+ ARRAY_SIZE(sdc2_card_det_config));
}
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 8e8d3e7..42f3f41 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -109,6 +109,8 @@
"msm_sdcc.3", NULL),
OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
"msm-tsens", NULL),
+ OF_DEV_AUXDATA("qcom,usb-bam-msm", 0xF9A44000, \
+ "usb_bam", NULL),
{}
};
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 023ce86..a25290f 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -46,6 +46,7 @@
#include <mach/usbdiag.h>
#include <mach/msm_memtypes.h>
#include <mach/msm_serial_hs.h>
+#include <mach/msm_serial_pdata.h>
#include <mach/pmic.h>
#include <mach/socinfo.h>
#include <mach/vreg.h>
@@ -82,6 +83,10 @@
.id = -1,
};
+static struct msm_serial_platform_data msm_8625_uart1_pdata = {
+ .userid = 10,
+};
+
static struct msm_gpio qup_i2c_gpios_io[] = {
{ GPIO_CFG(60, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
"qup_scl" },
@@ -994,6 +999,7 @@
if (machine_is_msm8625_evb() || machine_is_msm8625_qrd7()
|| machine_is_msm8625_evt()
|| machine_is_qrd_skud_prime()) {
+ msm8625_device_uart1.dev.platform_data = &msm_8625_uart1_pdata;
platform_add_devices(msm8625_evb_devices,
ARRAY_SIZE(msm8625_evb_devices));
platform_add_devices(qrd3_devices,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index a4d7e61..c394982 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -6319,12 +6319,12 @@
*/
/*
* Initialize MM AHB registers: Enable the FPB clock and disable HW
- * gating on 8627, 8960 and 8930ab for all clocks. Also set VFE_AHB's
+ * gating on 8627 and 8930ab for all clocks. Also set VFE_AHB's
* FORCE_CORE_ON bit to prevent its memory from being collapsed when
* the clock is halted. The sleep and wake-up delays are set to safe
* values.
*/
- if (cpu_is_msm8627() || cpu_is_msm8960ab() || cpu_is_msm8930ab()) {
+ if (cpu_is_msm8627() || cpu_is_msm8930ab()) {
rmwreg(0x00000003, AHB_EN_REG, 0x6C000103);
writel_relaxed(0x000007F9, AHB_EN2_REG);
} else {
@@ -6342,7 +6342,7 @@
/* Initialize MM AXI registers: Enable HW gating for all clocks that
* support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
* delays to safe values. */
- if (cpu_is_msm8960ab() || (cpu_is_msm8960() &&
+ if ((cpu_is_msm8960() &&
SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
cpu_is_msm8627() || cpu_is_msm8930ab()) {
rmwreg(0x000007F9, MAXI_EN_REG, 0x0803FFFF);
@@ -6365,8 +6365,6 @@
if (cpu_is_msm8627() || cpu_is_msm8930ab())
rmwreg(0x000003C7, SAXI_EN_REG, 0x00003FFF);
- else if (cpu_is_msm8960ab())
- rmwreg(0x000001C6, SAXI_EN_REG, 0x00001DF6);
else
rmwreg(0x00003C38, SAXI_EN_REG, 0x00003FFF);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index c0a553f..68bffa5 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -5120,6 +5120,7 @@
CLK_LOOKUP("core_clk", gcc_usb30_master_clk.c, "msm_dwc3"),
CLK_LOOKUP("utmi_clk", gcc_usb30_mock_utmi_clk.c, "msm_dwc3"),
CLK_LOOKUP("iface_clk", gcc_sys_noc_usb3_axi_clk.c, "msm_dwc3"),
+ CLK_LOOKUP("iface_clk", gcc_sys_noc_usb3_axi_clk.c, "msm_usb3"),
CLK_LOOKUP("sleep_clk", gcc_usb30_sleep_clk.c, "msm_dwc3"),
CLK_LOOKUP("sleep_a_clk", gcc_usb2a_phy_sleep_clk.c, "msm_dwc3"),
CLK_LOOKUP("sleep_b_clk", gcc_usb2b_phy_sleep_clk.c, "msm_dwc3"),
@@ -5345,10 +5346,14 @@
CLK_LOOKUP("osr_clk", audio_core_lpaif_ter_osr_clk.c, ""),
CLK_LOOKUP("ebit_clk", audio_core_lpaif_ter_ebit_clk.c, ""),
CLK_LOOKUP("ibit_clk", audio_core_lpaif_ter_ibit_clk.c, ""),
- CLK_LOOKUP("core_clk", audio_core_lpaif_quad_clk_src.c, ""),
- CLK_LOOKUP("osr_clk", audio_core_lpaif_quad_osr_clk.c, ""),
- CLK_LOOKUP("ebit_clk", audio_core_lpaif_quad_ebit_clk.c, ""),
- CLK_LOOKUP("ibit_clk", audio_core_lpaif_quad_ibit_clk.c, ""),
+ CLK_LOOKUP("core_clk", audio_core_lpaif_quad_clk_src.c,
+ "msm-dai-q6-mi2s.3"),
+ CLK_LOOKUP("osr_clk", audio_core_lpaif_quad_osr_clk.c,
+ "msm-dai-q6-mi2s.3"),
+ CLK_LOOKUP("ebit_clk", audio_core_lpaif_quad_ebit_clk.c,
+ "msm-dai-q6-mi2s.3"),
+ CLK_LOOKUP("ibit_clk", audio_core_lpaif_quad_ibit_clk.c,
+ "msm-dai-q6-mi2s.3"),
CLK_LOOKUP("pcm_clk", audio_core_lpaif_pcm0_clk_src.c,
"msm-dai-q6.4106"),
CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 33ec10a..b284168 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -472,7 +472,6 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
- .parent = &cxo_clk_src.c,
.dbg_name = "apcspll_clk_src",
.ops = &clk_ops_local_pll,
CLK_INIT(apcspll_clk_src.c),
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 14edbcf..f559629 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -2735,6 +2735,7 @@
static struct msm_dcvs_platform_data apq8064_dcvs_data = {
.sync_rules = apq8064_dcvs_sync_rules,
.num_sync_rules = ARRAY_SIZE(apq8064_dcvs_sync_rules),
+ .gpu_max_nom_khz = 320000,
};
struct platform_device apq8064_dcvs_device = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index c3be6ce..0faf500 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -669,6 +669,18 @@
.id = -1,
};
+static struct acpuclk_platform_data acpuclk_8930ab_pdata = {
+ .uses_pm8917 = false,
+};
+
+struct platform_device msm8930ab_device_acpuclk = {
+ .name = "acpuclk-8930ab",
+ .id = -1,
+ .dev = {
+ .platform_data = &acpuclk_8930ab_pdata,
+ },
+};
+
static struct fs_driver_data gfx3d_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index a839fcf..c59461a 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3246,7 +3246,30 @@
};
#endif
-static struct resource kgsl_3d0_resources[] = {
+struct resource kgsl_3d0_resources_8960ab[] = {
+ {
+ .name = KGSL_3D0_REG_MEMORY,
+ .start = 0x04300000, /* GFX3D address */
+ .end = 0x0430ffff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = KGSL_3D0_SHADER_MEMORY,
+ .start = 0x04310000, /* Shader Mem Address (8960AB) */
+ .end = 0x0431ffff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = KGSL_3D0_IRQ,
+ .start = GFX3D_IRQ,
+ .end = GFX3D_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+int kgsl_num_resources_8960ab = ARRAY_SIZE(kgsl_3d0_resources_8960ab);
+
+static struct resource kgsl_3d0_resources_8960[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
@@ -3330,8 +3353,8 @@
struct platform_device msm_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
- .num_resources = ARRAY_SIZE(kgsl_3d0_resources),
- .resource = kgsl_3d0_resources,
+ .num_resources = ARRAY_SIZE(kgsl_3d0_resources_8960),
+ .resource = kgsl_3d0_resources_8960,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 3888a4e..e55e9a7 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -562,6 +562,21 @@
.id = -1,
};
+struct platform_device msm_cpudai_incall_music_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8005,
+};
+
+struct platform_device msm_cpudai_incall_record_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8004,
+};
+
+struct platform_device msm_cpudai_incall_record_tx = {
+ .name = "msm-dai-q6",
+ .id = 0x8003,
+};
+
struct platform_device msm_i2s_cpudai0 = {
.name = "msm-dai-q6",
.id = PRIMARY_I2S_RX,
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 5296048..8fc5020 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -501,6 +501,18 @@
.exit_sleep3 = msm_gic_irq_exit_sleep3,
};
+void msm_clk_dump_debug_info(void)
+{
+ pr_info("%s: GLBL_CLK_ENA: 0x%08X\n", __func__,
+ readl_relaxed(MSM_CLK_CTL_BASE + 0x0));
+ pr_info("%s: GLBL_CLK_STATE: 0x%08X\n", __func__,
+ readl_relaxed(MSM_CLK_CTL_BASE + 0x4));
+ pr_info("%s: GRP_NS_REG: 0x%08X\n", __func__,
+ readl_relaxed(MSM_CLK_CTL_BASE + 0x84));
+ pr_info("%s: CLK_HALT_STATEB: 0x%08X\n", __func__,
+ readl_relaxed(MSM_CLK_CTL_BASE + 0x10C));
+}
+
void __init msm_pm_register_irqs(void)
{
if (cpu_is_msm8625())
@@ -1917,8 +1929,6 @@
else if (msm8625_cpu_id() == MSM8625)
msm_cpr_pdata.max_freq = 1008000;
- msm_cpr_clk_enable();
-
platform_device_register(&msm8625_vp_device);
platform_device_register(&msm8625_device_cpr);
}
@@ -2137,6 +2147,7 @@
static int msm7627a_panic_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ msm_clk_dump_debug_info();
flush_cache_all();
outer_flush_all();
return NOTIFY_DONE;
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index eae01aa..bd9ea49 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -327,6 +327,9 @@
extern struct platform_device msm_kgsl_2d0;
extern struct platform_device msm_kgsl_2d1;
+extern struct resource kgsl_3d0_resources_8960ab[];
+extern int kgsl_num_resources_8960ab;
+
extern struct platform_device msm_mipi_dsi1_device;
extern struct platform_device mipi_dsi_device;
extern struct platform_device msm_lcdc_device;
@@ -455,6 +458,7 @@
extern struct platform_device msm8x60_device_acpuclk;
extern struct platform_device msm8930_device_acpuclk;
extern struct platform_device msm8930aa_device_acpuclk;
+extern struct platform_device msm8930ab_device_acpuclk;
extern struct platform_device msm8960_device_acpuclk;
extern struct platform_device msm8960ab_device_acpuclk;
extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 9c5e52c..8b5c70f 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -465,6 +465,7 @@
char dlane_swap;
void (*dsi_pwm_cfg)(void);
char enable_wled_bl_ctrl;
+ void (*gpio_set_backlight)(int bl_level);
};
struct lvds_panel_platform_data {
@@ -598,6 +599,13 @@
void msm_map_msm8910_io(void);
void msm8910_init_irq(void);
+/* Dump debug info (states, rate, etc) of clocks */
+#if defined(CONFIG_ARCH_MSM7X27)
+void msm_clk_dump_debug_info(void);
+#else
+static inline void msm_clk_dump_debug_info(void) {}
+#endif
+
struct mmc_platform_data;
int msm_add_sdcc(unsigned int controller,
struct mmc_platform_data *plat);
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 57b4bd3..ea3fb64 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -147,6 +147,59 @@
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+enum {
+ PROC_APPS,
+ PROC_GPU,
+ PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+ uint32_t flag[PROC_MAX];
+ uint32_t turn;
+};
+
+#ifdef CONFIG_MSM_IOMMU
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+ return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+#endif
+
+#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
+void msm_iommu_remote_p0_spin_lock(void);
+void msm_iommu_remote_p0_spin_unlock(void);
+
+#define msm_iommu_remote_lock_init() _msm_iommu_remote_spin_lock_init()
+#define msm_iommu_remote_spin_lock() msm_iommu_remote_p0_spin_lock()
+#define msm_iommu_remote_spin_unlock() msm_iommu_remote_p0_spin_unlock()
+#else
+#define msm_iommu_remote_lock_init()
+#define msm_iommu_remote_spin_lock()
+#define msm_iommu_remote_spin_unlock()
+#endif
+
+/* Allows kgsl iommu driver to acquire lock */
+#define msm_iommu_lock() \
+ do { \
+ msm_iommu_mutex_lock(); \
+ msm_iommu_remote_spin_lock(); \
+ } while (0)
+
+#define msm_iommu_unlock() \
+ do { \
+ msm_iommu_remote_spin_unlock(); \
+ msm_iommu_mutex_unlock(); \
+ } while (0)
+
#ifdef CONFIG_MSM_IOMMU
/*
* Look up an IOMMU context device by its context name. NULL if none found.
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
new file mode 100644
index 0000000..dae6d3b
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -0,0 +1,732 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/sps.h>
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+ IPA_BYPASS_NAT,
+ IPA_SRC_NAT,
+ IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ */
+enum ipa_mode_type {
+ IPA_BASIC,
+ IPA_ENABLE_FRAMING_HDLC,
+ IPA_ENABLE_DEFRAMING_HDLC,
+ IPA_DMA,
+};
+
+/**
+ * enum ipa_aggr_en_type - aggregation setting type in IPA
+ * end-point
+ */
+enum ipa_aggr_en_type {
+ IPA_BYPASS_AGGR,
+ IPA_ENABLE_AGGR,
+ IPA_ENABLE_DEAGGR,
+};
+
+/**
+ * enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+ IPA_MBIM_16,
+ IPA_MBIM_32,
+ IPA_TLP,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+ IPA_MBIM,
+ IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+ IPA_RECEIVE,
+ IPA_WRITE_DONE,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en: This defines the default NAT mode for the pipe: in case of
+ * filter miss - the default NAT mode defines the NATing operation
+ * on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+ enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ * @hdr_len: Header length in bytes to be added/removed. Assuming header len
+ * is constant per endpoint. Valid for both Input and Output Pipes
+ * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no
+ * metadata within header.
+ * 1: Metadata_Ofst value is valid, i.e., metadata
+ * within header is in offset Metadata_Ofst Valid
+ * for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_ofst_metadata: Offset within header in which metadata resides
+ * Size of metadata - 4bytes
+ * Example - Stream ID/SSID/mux ID.
+ * Valid for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_additional_const_len: Defines the constant length that should be added
+ * to the payload length in order for IPA to update
+ * correctly the length field within the header
+ * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no
+ * length field within the inserted header
+ * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a
+ * packet length field resides within the header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon
+ * Header Insertion, IPA will update this field within the
+ * header with the packet length . Assumption is that
+ * header length field size is constant and is 2Bytes
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet.
+ * This bit is valid only when Hdr_En=01(Header Insertion)
+ * SW should set this bit for IPA-to-A5 pipes.
+ * 0: Do not insert A5 Mux Header
+ * 1: Insert A5 Mux Header
+ * Valid for Output Pipes (IPA Producer)
+ */
+struct ipa_ep_cfg_hdr {
+ u32 hdr_len;
+ u32 hdr_ofst_metadata_valid;
+ u32 hdr_ofst_metadata;
+ u32 hdr_additional_const_len;
+ u32 hdr_ofst_pkt_size_valid;
+ u32 hdr_ofst_pkt_size;
+ u32 hdr_a5_mux;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode: Valid for Input Pipes only (IPA Consumer)
+ * @dst: This parameter specifies the output pipe to which the packets
+ * will be routed to.
+ * This parameter is valid for Mode=DMA and not valid for
+ * Mode=Basic
+ * Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+ enum ipa_mode_type mode;
+ enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ * @aggr_en: Valid for both Input and Output Pipes
+ * @aggr: Valid for both Input and Output Pipes
+ * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set
+ * to 0, there is no size limitation on the aggregation.
+ * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ * to 0, there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0,
+ * there is no time limitation on the aggregation. When
+ * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ * there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_aggr {
+ enum ipa_aggr_en_type aggr_en;
+ enum ipa_aggr_type aggr;
+ u32 aggr_byte_limit;
+ u32 aggr_time_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl: Defines the default routing table index to be used in case there
+ * is no filter rule matching, valid for Input Pipes only (IPA
+ * Consumer). Clients should set this to 0 which will cause default
+ * v4 and v6 routes setup internally by IPA driver to be used for
+ * this end-point
+ */
+struct ipa_ep_cfg_route {
+ u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat: NAT parmeters
+ * @hdr: Header parameters
+ * @mode: Mode parameters
+ * @aggr: Aggregation parameters
+ * @route: Routing parameters
+ */
+struct ipa_ep_cfg {
+ struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_hdr hdr;
+ struct ipa_ep_cfg_mode mode;
+ struct ipa_ep_cfg_aggr aggr;
+ struct ipa_ep_cfg_route route;
+};
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @client_bam_hdl: client SPS handle
+ * @client_ep_idx: client PER EP index
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @desc_fifo_sz: size of desc FIFO
+ * @data_fifo_sz: size of data FIFO
+ * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback
+ * to sys mem if pipe mem alloc fails
+ * @desc: desc FIFO meta-data when client has allocated it
+ * @data: data FIFO meta-data when client has allocated it
+ */
+struct ipa_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 client_bam_hdl;
+ u32 client_ep_idx;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ u32 desc_fifo_sz;
+ u32 data_fifo_sz;
+ bool pipe_mem_preferred;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_sps_params - SPS related output parameters resulting from
+ * low/high level client connect
+ * @ipa_bam_hdl: IPA SPS handle
+ * @ipa_ep_idx: IPA PER EP index
+ * @desc: desc FIFO meta-data
+ * @data: data FIFO meta-data
+ */
+struct ipa_sps_params {
+ u32 ipa_bam_hdl;
+ u32 ipa_ep_idx;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props: number of tx properties
+ * @prop: the tx properties array
+ */
+struct ipa_tx_intf {
+ u32 num_props;
+ struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props: number of rx properties
+ * @prop: the rx properties array
+ */
+struct ipa_rx_intf {
+ u32 num_props;
+ struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie
+ * evt - type of event
+ * data - data relevant to event. May not be valid. See event_type
+ * enum for valid cases.
+ */
+struct ipa_sys_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 desc_fifo_sz;
+ void *priv;
+ void (*notify)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+};
+
+/**
+ * struct ipa_msg_meta_wrapper - message meta-data wrapper
+ * @meta: the meta-data itself
+ * @link: opaque to client
+ * @meta_wrapper_free: function to free the metadata wrapper when IPA driver
+ * is done with it
+ */
+struct ipa_msg_meta_wrapper {
+ struct ipa_msg_meta meta;
+ struct list_head link;
+ void (*meta_wrapper_free)(struct ipa_msg_meta_wrapper *buff);
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @mbim_stream_id: the stream ID used in NDP signature
+ * @mbim_stream_id_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+ u8 mbim_stream_id;
+ bool mbim_stream_id_valid;
+};
+
+/**
+ * struct ipa_msg_wrapper - message wrapper
+ * @msg: the message buffer itself, MUST exist after call returns, will
+ * be freed by IPA driver when it is done with it
+ * @link: opaque to client
+ * @msg_free: function to free the message when IPA driver is done with it
+ * @msg_wrapper_free: function to free the message wrapper when IPA driver is
+ * done with it
+ */
+struct ipa_msg_wrapper {
+ void *msg;
+ struct list_head link;
+ void (*msg_free)(void *msg);
+ void (*msg_wrapper_free)(struct ipa_msg_wrapper *buff);
+};
+
+/**
+ * typedef ipa_pull_fn - callback function
+ * @buf - [in] the buffer to populate the message into
+ * @sz - [in] the size of the buffer
+ *
+ * callback function registered by kernel client with IPA driver for IPA driver
+ * to be able to pull messages from the kernel client asynchronously.
+ *
+ * Returns how many bytes were copied into the buffer, negative on failure.
+ */
+typedef int (*ipa_pull_fn)(void *buf, uint16_t sz);
+
+#ifdef CONFIG_IPA
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * rmnet bridge
+ */
+int rmnet_bridge_init(void);
+
+int rmnet_bridge_disconnect(void);
+
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+#else
+
+/*
+ * Connect / Disconnect
+ */
+static inline int ipa_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disconnect(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep(u32 clnt_hdl,
+ const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_nat(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_mode(u32 clnt_hdl,
+ const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_aggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_route(u32 clnt_hdl,
+ const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Header removal / addition
+ */
+static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_commit_hdr(void)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_reset_hdr(void)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Filtering
+ */
+static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+
+/*
+ * NAT
+ */
+static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ return -EPERM;
+}
+
+
+/*
+ * rmnet bridge
+ */
+static inline int rmnet_bridge_init(void)
+{
+ return -EPERM;
+}
+
+
+static inline int rmnet_bridge_disconnect(void)
+{
+ return -EPERM;
+}
+
+
+static inline int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+
+/*
+ * System pipes
+ */
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+
+#endif /* CONFIG_IPA*/
+
+#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/irqs-8226.h b/arch/arm/mach-msm/include/mach/irqs-8226.h
index 7e174b9..72602b1 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8226.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8226.h
@@ -32,7 +32,7 @@
#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
#define NR_MSM_IRQS 256
-#define NR_GPIO_IRQS 146
+#define NR_GPIO_IRQS 117
#define NR_QPNP_IRQS 32768 /* SPARSE_IRQ is required to support this */
#define NR_BOARD_IRQS NR_QPNP_IRQS
#define NR_TLMM_MSM_DIR_CONN_IRQ 8
diff --git a/arch/arm/mach-msm/include/mach/irqs-9625.h b/arch/arm/mach-msm/include/mach/irqs-9625.h
index abafc23..b1f65d1 100644
--- a/arch/arm/mach-msm/include/mach/irqs-9625.h
+++ b/arch/arm/mach-msm/include/mach/irqs-9625.h
@@ -21,6 +21,10 @@
* 32+: SPI (shared peripheral interrupts)
*/
+#define GIC_PPI_START 16
+
+#define INT_ARMQC_PERFMON (GIC_PPI_START + 7)
+
#define GIC_SPI_START 32
#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 7aff770..8d96192 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -19,8 +19,6 @@
#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31))
-#include "irqs-8625.h"
-
#if defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_APQ8064) || \
defined(CONFIG_ARCH_MSM8930)
@@ -78,7 +76,8 @@
#elif defined(CONFIG_ARCH_MSM8X60)
#include "irqs-8x60.h"
#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7X25) \
- || defined(CONFIG_ARCH_MSM7X27)
+ || defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM8625)
+#include "irqs-8625.h"
#include "irqs-7xxx.h"
#define NR_GPIO_IRQS 133
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index a51cc46..f07a9e8 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -27,6 +27,7 @@
(val*1000*1000U)
#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory"
+#define KGSL_3D0_SHADER_MEMORY "kgsl_3d0_shader_memory"
#define KGSL_3D0_IRQ "kgsl_3d0_irq"
#define KGSL_2D0_REG_MEMORY "kgsl_2d0_reg_memory"
#define KGSL_2D0_IRQ "kgsl_2d0_irq"
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index acfbe4a..d089924 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -98,26 +98,25 @@
#define finish_arch_switch(prev) do { store_ttbr0(); } while (0)
#endif
+#define MAX_HOLE_ADDRESS (PHYS_OFFSET + 0x10000000)
+extern unsigned long memory_hole_offset;
+extern unsigned long memory_hole_start;
+extern unsigned long memory_hole_end;
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-extern unsigned long membank0_size;
-extern unsigned long membank1_start;
-void find_membank0_hole(void);
+void find_memory_hole(void);
-#define MEMBANK0_PHYS_OFFSET PHYS_OFFSET
-#define MEMBANK0_PAGE_OFFSET PAGE_OFFSET
-
-#define MEMBANK1_PHYS_OFFSET (membank1_start)
-#define MEMBANK1_PAGE_OFFSET (MEMBANK0_PAGE_OFFSET + (membank0_size))
+#define MEM_HOLE_END_PHYS_OFFSET (memory_hole_end)
+#define MEM_HOLE_PAGE_OFFSET (PAGE_OFFSET + memory_hole_offset)
#define __phys_to_virt(phys) \
- ((MEMBANK1_PHYS_OFFSET && ((phys) >= MEMBANK1_PHYS_OFFSET)) ? \
- (phys) - MEMBANK1_PHYS_OFFSET + MEMBANK1_PAGE_OFFSET : \
- (phys) - MEMBANK0_PHYS_OFFSET + MEMBANK0_PAGE_OFFSET)
+ ((MEM_HOLE_END_PHYS_OFFSET && ((phys) >= MEM_HOLE_END_PHYS_OFFSET)) ? \
+ (phys) - MEM_HOLE_END_PHYS_OFFSET + MEM_HOLE_PAGE_OFFSET : \
+ (phys) - PHYS_OFFSET + PAGE_OFFSET)
#define __virt_to_phys(virt) \
- ((MEMBANK1_PHYS_OFFSET && ((virt) >= MEMBANK1_PAGE_OFFSET)) ? \
- (virt) - MEMBANK1_PAGE_OFFSET + MEMBANK1_PHYS_OFFSET : \
- (virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
+ ((MEM_HOLE_END_PHYS_OFFSET && ((virt) >= MEM_HOLE_PAGE_OFFSET)) ? \
+ (virt) - MEM_HOLE_PAGE_OFFSET + MEM_HOLE_END_PHYS_OFFSET : \
+ (virt) - PAGE_OFFSET + PHYS_OFFSET)
#endif
/*
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index 84a7dc0..ab0e72f 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -29,6 +29,7 @@
unsigned int len;
int ahb;
const char *fabclk[NUM_CTX];
+ const char *iface_clk;
unsigned int offset;
unsigned int haltid;
unsigned int rpm_enabled;
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs.h b/arch/arm/mach-msm/include/mach/msm_dcvs.h
index c29b57a..2ad7d22 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs.h
@@ -44,6 +44,7 @@
struct msm_dcvs_platform_data {
struct msm_dcvs_sync_rule *sync_rules;
unsigned num_sync_rules;
+ unsigned long gpu_max_nom_khz;
};
struct msm_gov_platform_data {
@@ -154,4 +155,23 @@
* Update the frequency known to dcvs when the limits are changed.
*/
extern void msm_dcvs_update_limits(int dcvs_core_id);
+
+/**
+ * msm_dcvs_apply_gpu_floor
+ * @cpu_freq: CPU frequency to compare to GPU sync rules
+ *
+ * Apply a GPU floor frequency if the corresponding CPU frequency,
+ * or the number of CPUs online, requires it.
+ */
+extern void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq);
+
+/**
+ * msm_dcvs_update_algo_params
+ * @return:
+ * 0 on success, < 0 on error
+ *
+ * Updates the DCVS algorithm with parameters depending on the
+ * number of CPUs online.
+ */
+extern int msm_dcvs_update_algo_params(void);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
index c03b513..ab43a8a 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
@@ -37,9 +37,12 @@
#define MSM8226_TLMM_PHYS 0xFD510000
#define MSM8226_TLMM_SIZE SZ_16K
-#define MSM8226_IMEM_PHYS 0xFC42B000
+#define MSM8226_IMEM_PHYS 0xFE805000
#define MSM8226_IMEM_SIZE SZ_4K
+#define MSM8226_MPM2_PSHOLD_PHYS 0xFC4AB000
+#define MSM8226_MPM2_PSHOLD_SIZE SZ_4K
+
#ifdef CONFIG_DEBUG_MSM8226_UART
#define MSM_DEBUG_UART_BASE IOMEM(0xFA71E000)
#define MSM_DEBUG_UART_PHYS 0xF991E000
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 5ca5861..80e454a 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -68,6 +68,8 @@
int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
int depth, void *data);
-
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+ int depth, void *data);
+void adjust_meminfo(unsigned long start, unsigned long size);
unsigned long __init reserve_memory_for_fmem(unsigned long, unsigned long);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_serial_pdata.h b/arch/arm/mach-msm/include/mach/msm_serial_pdata.h
index 4153cb2..40bdc9d 100644
--- a/arch/arm/mach-msm/include/mach/msm_serial_pdata.h
+++ b/arch/arm/mach-msm/include/mach/msm_serial_pdata.h
@@ -10,8 +10,8 @@
* GNU General Public License for more details.
*/
-#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
-#define __ASM_ARCH_MSM_SERIAL_HS_H
+#ifndef __ASM_ARCH_MSM_SERIAL_H
+#define __ASM_ARCH_MSM_SERIAL_H
#include <linux/serial_core.h>
@@ -22,6 +22,7 @@
/* bool: inject char into rx tty on wakeup */
unsigned char inject_rx_on_wakeup;
char rx_to_inject;
+ int userid;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 11d3014..ab5271f 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2009, 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,10 @@
void (*gpio_release)(void);
int (*dma_config)(void);
const char *rsl_id;
- uint32_t pm_lat;
- uint32_t infinite_mode;
+ u32 pm_lat;
+ u32 infinite_mode;
+ bool ver_reg_exists;
+ bool use_bam;
+ u32 bam_consumer_pipe_index;
+ u32 bam_producer_pipe_index;
};
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index cb8aae0..6124cd6 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -22,7 +22,7 @@
/* Maximum number of slots in DM */
#define OCMEM_MAX_CHUNKS 32
-#define MIN_CHUNK_SIZE 128
+#define MIN_CHUNK_SIZE SZ_4K
struct ocmem_notifier;
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 34bdc79..0499a7a 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -111,6 +111,7 @@
MSM_CPU_8092,
MSM_CPU_8226,
MSM_CPU_8910,
+ MSM_CPU_8625Q,
};
enum pmic_model {
@@ -447,6 +448,18 @@
#endif
}
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
static inline int soc_class_is_msm8960(void)
{
return cpu_is_msm8960() || cpu_is_msm8960ab();
diff --git a/arch/arm/mach-msm/include/mach/sps.h b/arch/arm/mach-msm/include/mach/sps.h
index 5333c2e..a000c3e 100644
--- a/arch/arm/mach-msm/include/mach/sps.h
+++ b/arch/arm/mach-msm/include/mach/sps.h
@@ -160,6 +160,8 @@
SPS_O_AUTO_ENABLE = 0x20000000,
/* DISABLE endpoint synchronization for config/enable/disable */
SPS_O_NO_EP_SYNC = 0x40000000,
+ /* Allow partial polling duing IRQ mode */
+ SPS_O_HYBRID = 0x80000000,
};
/**
diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h
index 47313a7..5e1ef6f 100644
--- a/arch/arm/mach-msm/include/mach/usb_bam.h
+++ b/arch/arm/mach-msm/include/mach/usb_bam.h
@@ -13,6 +13,7 @@
#ifndef _USB_BAM_H_
#define _USB_BAM_H_
#include "sps.h"
+#include <mach/ipa.h>
/**
* SPS Pipes direction.
@@ -27,6 +28,22 @@
PEER_PERIPHERAL_TO_USB,
};
+struct usb_bam_connect_ipa_params {
+ u8 idx;
+ u32 *src_pipe;
+ u32 *dst_pipe;
+ enum usb_bam_pipe_dir dir;
+ /* client handle assigned by IPA to client */
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ /* params assigned by the CD */
+ enum ipa_client_type client;
+ struct ipa_ep_cfg ipa_ep_cfg;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+};
+
#ifdef CONFIG_USB_BAM
/**
* Connect USB-to-Periperal SPS connection.
@@ -47,6 +64,31 @@
int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx);
/**
+ * Connect USB-to-IPA SPS connection.
+ *
+ * This function returns the allocated pipes number adn clnt handles.
+ *
+ * @ipa_params - in/out parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int usb_bam_connect_ipa(struct usb_bam_connect_ipa_params *ipa_params);
+
+/**
+ * Disconnect USB-to-IPA SPS connection.
+ *
+ * @idx - Connection index.
+ *
+ * @ipa_params - in/out parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int usb_bam_disconnect_ipa(u8 idx,
+ struct usb_bam_connect_ipa_params *ipa_params);
+
+/**
* Register a wakeup callback from peer BAM.
*
* @idx - Connection index.
@@ -96,6 +138,18 @@
return -ENODEV;
}
+static inline int usb_bam_connect_ipa(
+ struct usb_bam_connect_ipa_params *ipa_params)
+{
+ return -ENODEV;
+}
+
+static inline int usb_bam_disconnect_ipa(u8 idx,
+ struct usb_bam_connect_ipa_params *ipa_params)
+{
+ return -ENODEV;
+}
+
static inline int usb_bam_register_wake_cb(u8 idx,
int (*callback)(void *), void* param)
{
diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
index be11989..41dac62 100644
--- a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
+++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
@@ -21,12 +21,13 @@
USB_GADGET_XPORT_SMD,
USB_GADGET_XPORT_BAM,
USB_GADGET_XPORT_BAM2BAM,
+ USB_GADGET_XPORT_BAM2BAM_IPA,
USB_GADGET_XPORT_HSIC,
USB_GADGET_XPORT_HSUART,
USB_GADGET_XPORT_NONE,
};
-#define XPORT_STR_LEN 10
+#define XPORT_STR_LEN 12
static char *xport_to_str(enum transport_type t)
{
@@ -41,6 +42,8 @@
return "BAM";
case USB_GADGET_XPORT_BAM2BAM:
return "BAM2BAM";
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ return "BAM2BAM_IPA";
case USB_GADGET_XPORT_HSIC:
return "HSIC";
case USB_GADGET_XPORT_HSUART:
@@ -64,6 +67,8 @@
return USB_GADGET_XPORT_BAM;
if (!strncasecmp("BAM2BAM", name, XPORT_STR_LEN))
return USB_GADGET_XPORT_BAM2BAM;
+ if (!strncasecmp("BAM2BAM_IPA", name, XPORT_STR_LEN))
+ return USB_GADGET_XPORT_BAM2BAM_IPA;
if (!strncasecmp("HSIC", name, XPORT_STR_LEN))
return USB_GADGET_XPORT_HSIC;
if (!strncasecmp("HSUART", name, XPORT_STR_LEN))
diff --git a/arch/arm/mach-msm/include/mach/usbdiag.h b/arch/arm/mach-msm/include/mach/usbdiag.h
index d1e3605..d9320c3 100644
--- a/arch/arm/mach-msm/include/mach/usbdiag.h
+++ b/arch/arm/mach-msm/include/mach/usbdiag.h
@@ -1,6 +1,6 @@
/* include/asm-arm/arch-msm/usbdiag.h
*
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2010, 2012, The Linux Foundation. All rights reserved.
*
* All source code in this file is licensed under the following license except
* where indicated.
@@ -21,8 +21,11 @@
#ifndef _DRIVERS_USB_DIAG_H_
#define _DRIVERS_USB_DIAG_H_
+#include <linux/err.h>
+
#define DIAG_LEGACY "diag"
#define DIAG_MDM "diag_mdm"
+#define DIAG_QSC "diag_qsc"
#define USB_DIAG_CONNECT 0
#define USB_DIAG_DISCONNECT 1
@@ -45,6 +48,7 @@
void *priv_usb;
};
+#ifdef CONFIG_USB_G_ANDROID
struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
void (*notify)(void *, unsigned, struct diag_request *));
void usb_diag_close(struct usb_diag_ch *ch);
@@ -52,7 +56,32 @@
void usb_diag_free_req(struct usb_diag_ch *ch);
int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req);
int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req);
-
-int diag_read_from_cb(unsigned char * , int);
-
+#else
+static inline struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+ void (*notify)(void *, unsigned, struct diag_request *))
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void usb_diag_close(struct usb_diag_ch *ch)
+{
+}
+static inline
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+ return -ENODEV;
+}
+static inline void usb_diag_free_req(struct usb_diag_ch *ch)
+{
+}
+static inline
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ return -ENODEV;
+}
+static inline
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_USB_G_ANDROID */
#endif /* _DRIVERS_USB_DIAG_H_ */
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 52bb8ef..cd70ae9 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -519,6 +519,7 @@
MSM_CHIP_DEVICE(APCS_GCC, MSM8226),
MSM_CHIP_DEVICE(TLMM, MSM8226),
MSM_CHIP_DEVICE(IMEM, MSM8226),
+ MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM8226),
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
.length = MSM_SHARED_RAM_SIZE,
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 3fe65b8..9cc2a9d 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -27,6 +27,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <mach/msm_memtypes.h>
+#include <mach/memory.h>
#include <linux/hardirq.h>
#if defined(CONFIG_MSM_NPA_REMOTE)
#include "npa_remote.h"
@@ -365,7 +366,7 @@
return ret;
}
-static int check_for_compat(unsigned long node)
+static int __init check_for_compat(unsigned long node)
{
char **start = __compat_exports_start;
@@ -454,6 +455,79 @@
return 0;
}
+/* This function scans the device tree to populate the memory hole table */
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ unsigned int *memory_remove_prop;
+ unsigned long memory_remove_prop_length;
+ unsigned long hole_start;
+ unsigned long hole_size;
+
+ memory_remove_prop = of_get_flat_dt_prop(node,
+ "qcom,memblock-remove",
+ &memory_remove_prop_length);
+
+ if (memory_remove_prop) {
+ if (!check_for_compat(node))
+ goto out;
+ } else {
+ goto out;
+ }
+
+ if (memory_remove_prop) {
+ if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+ WARN(1, "Memory remove malformed\n");
+ goto out;
+ }
+
+ hole_start = be32_to_cpu(memory_remove_prop[0]);
+ hole_size = be32_to_cpu(memory_remove_prop[1]);
+
+ if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
+ if (memory_hole_start == 0 && memory_hole_end == 0) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start + hole_size;
+ } else if ((memory_hole_end - memory_hole_start)
+ <= hole_size) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start + hole_size;
+ }
+ }
+ adjust_meminfo(hole_start, hole_size);
+ }
+
+out:
+ return 0;
+}
+
+/*
+ * Split the memory bank to reflect the hole, if present,
+ * using the start and end of the memory hole.
+ */
+void adjust_meminfo(unsigned long start, unsigned long size)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[j];
+ *bank = meminfo.bank[i];
+
+ if (((start + size) <= (bank->start + bank->size)) &&
+ (start >= bank->start)) {
+ memmove(bank + 1, bank,
+ (meminfo.nr_banks - i) * sizeof(*bank));
+ meminfo.nr_banks++;
+ i++;
+ bank[1].size -= (start + size);
+ bank[1].start = (start + size);
+ bank[1].highmem = 0;
+ j++;
+ bank->size = start - bank->start;
+ }
+ j++;
+ }
+}
unsigned long get_ddr_size(void)
{
unsigned int i;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index e0ab983..ea17efe 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -485,7 +485,7 @@
};
#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000220)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
enum bimc_m_priolvl_override {
M_PRIOLVL_OVERRIDE_RMSK = 0x301,
M_PRIOLVL_OVERRIDE_BMSK = 0x300,
@@ -495,10 +495,10 @@
};
#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
enum bimc_m_read_command_override {
- M_RD_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x300000,
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
@@ -529,13 +529,15 @@
};
#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
enum bimc_m_write_command_override {
- M_WR_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x30000,
- M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x10,
- M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x7000,
- M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
@@ -544,8 +546,10 @@
M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x20,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
@@ -1454,7 +1458,7 @@
* boundary in future
*/
wmb();
- set_qos_mode(binfo->base, mas_index, 1, 1, 1);
+ set_qos_mode(binfo->base, mas_index, 0, 1, 1);
break;
case BIMC_QOS_MODE_BYPASS:
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
index f0f5cd8..dbfa5ec 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -642,6 +642,8 @@
.qport = qports_crypto_c0,
.mas_hw_id = MAS_CRYPTO_CORE0,
.hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_CRYPTO_CORE1,
@@ -653,6 +655,8 @@
.qport = qports_crypto_c1,
.mas_hw_id = MAS_CRYPTO_CORE1,
.hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_LPASS_PROC,
@@ -722,6 +726,7 @@
.prio_rd = 2,
.prio_wr = 2,
.hw_sel = MSM_BUS_NOC,
+ .iface_clk_node = "msm_usb3",
},
{
.id = MSM_BUS_SLAVE_AMPSS,
@@ -806,7 +811,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_oxili,
@@ -819,7 +824,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_gemini,
.ws = 10000,
@@ -832,7 +837,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_mdp,
.ws = 10000,
@@ -845,7 +850,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p0,
@@ -858,7 +863,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p1,
@@ -871,7 +876,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_vfe,
@@ -1049,9 +1054,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
- .prio_lvl = 0,
- .prio_rd = 2,
- .prio_wr = 2,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_AMPSS_M1,
@@ -1064,6 +1068,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_MSS_PROC,
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 12d6862..2c6efb8 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -71,6 +71,7 @@
int hw_sel;
const char *slaveclk[NUM_CTX];
const char *memclk[NUM_CTX];
+ const char *iface_clk_node;
unsigned int buswidth;
unsigned int ws;
unsigned int mode;
@@ -117,6 +118,7 @@
int commit_index;
struct nodeclk nodeclk[NUM_CTX];
struct nodeclk memclk[NUM_CTX];
+ struct nodeclk iface_clk;
void *hw_data;
};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 7169440..b6870c6 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -175,6 +175,15 @@
}
}
+ if (info->node_info->iface_clk_node) {
+ info->iface_clk.clk = clk_get_sys(info->node_info->
+ iface_clk_node, "iface_clk");
+ if (IS_ERR(info->iface_clk.clk)) {
+ MSM_BUS_ERR("ERR: Couldn't get clk %s\n",
+ info->node_info->iface_clk_node);
+ }
+ }
+
ret = info->node_info->gateway ?
msm_bus_fabric_add_fab(fabric, info) :
msm_bus_fabric_add_node(fabric, info);
@@ -187,6 +196,12 @@
if (fabric->fabdev.hw_algo.node_init == NULL)
continue;
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Enabled iface clock for node init: %d\n",
+ info->node_info->priv_id);
+ clk_prepare_enable(info->iface_clk.clk);
+ }
+
for (j = 0; j < NUM_CTX; j++)
clk_prepare_enable(fabric->info.nodeclk[j].clk);
@@ -198,6 +213,14 @@
for (j = 0; j < NUM_CTX; j++)
clk_disable_unprepare(fabric->info.nodeclk[j].clk);
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Disable iface_clk after node init: %d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+
+
}
MSM_BUS_DBG("Fabric: %d nmasters: %d nslaves: %d\n"
@@ -355,14 +378,35 @@
return;
}
+ /* Enable clocks before accessing QoS registers */
for (i = 0; i < NUM_CTX; i++)
clk_prepare_enable(fabric->info.nodeclk[i].clk);
+ if (info->iface_clk.clk)
+ clk_prepare_enable(info->iface_clk.clk);
+
+ if (hop->iface_clk.clk)
+ clk_prepare_enable(hop->iface_clk.clk);
+
fabdev->hw_algo.update_bw(hop, info, fabric->pdata, sel_cdata,
master_tiers, add_bw);
+
+ /* Disable clocks after accessing QoS registers */
for (i = 0; i < NUM_CTX; i++)
clk_disable_unprepare(fabric->info.nodeclk[i].clk);
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Commented: Will disable clock for info: %d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+
+ if (hop->iface_clk.clk) {
+ MSM_BUS_DBG("Commented Will disable clock for hop: %d\n",
+ hop->node_info->priv_id);
+ clk_disable_unprepare(hop->iface_clk.clk);
+ }
+
fabric->arb_dirty = true;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index fb2e5da..9e89256 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -362,16 +362,18 @@
}
for (i = 0; i < info->node_info->num_mports; i++) {
- if (info->node_info->mode != NOC_QOS_MODE_BYPASS)
+ if (info->node_info->mode != NOC_QOS_MODE_BYPASS) {
noc_set_qos_priority(ninfo, info->node_info->qport[i],
prio);
- if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
- struct msm_bus_noc_qos_bw qbw;
- qbw.ws = info->node_info->ws;
- qbw.bw = 0;
- msm_bus_noc_set_qos_bw(ninfo, info->node_info->qport[i],
- info->node_info->perm_mode, &qbw);
+ if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_bw qbw;
+ qbw.ws = info->node_info->ws;
+ qbw.bw = 0;
+ msm_bus_noc_set_qos_bw(ninfo, info->node_info->
+ qport[i], info->node_info->perm_mode,
+ &qbw);
+ }
}
noc_set_qos_mode(ninfo, info->node_info->qport[i], info->
diff --git a/arch/arm/mach-msm/msm_cpr.c b/arch/arm/mach-msm/msm_cpr.c
index b68a8db..c7a8b98 100644
--- a/arch/arm/mach-msm/msm_cpr.c
+++ b/arch/arm/mach-msm/msm_cpr.c
@@ -906,6 +906,14 @@
return -ENOMEM;
}
+ /* enable clk for cpr */
+ if (!pdata->clk_enable) {
+ pr_err("CPR: Invalid clk_enable hook\n");
+ return -EFAULT;
+ }
+
+ pdata->clk_enable();
+
/* Initialize platform_data */
cpr->config = pdata;
diff --git a/arch/arm/mach-msm/msm_dcvs.c b/arch/arm/mach-msm/msm_dcvs.c
index 2736870..41afd24 100644
--- a/arch/arm/mach-msm/msm_dcvs.c
+++ b/arch/arm/mach-msm/msm_dcvs.c
@@ -150,6 +150,8 @@
static unsigned num_cpu_freqs;
static struct msm_dcvs_platform_data *dcvs_pdata;
+static DEFINE_MUTEX(gpu_floor_mutex);
+
static void force_stop_slack_timer(struct dcvs_core *core)
{
unsigned long flags;
@@ -256,33 +258,49 @@
spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
}
-static void apply_gpu_floor(int cpu_freq)
+void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq)
{
- int i;
- int gpu_floor_freq = 0;
+ static unsigned long curr_cpu0_freq;
+ unsigned long gpu_floor_freq = 0;
struct dcvs_core *gpu;
+ int i;
if (!dcvs_pdata)
return;
+ mutex_lock(&gpu_floor_mutex);
+
+ if (cpu_freq)
+ curr_cpu0_freq = cpu_freq;
+
for (i = 0; i < dcvs_pdata->num_sync_rules; i++)
- if (cpu_freq > dcvs_pdata->sync_rules[i].cpu_khz) {
+ if (curr_cpu0_freq > dcvs_pdata->sync_rules[i].cpu_khz) {
gpu_floor_freq =
dcvs_pdata->sync_rules[i].gpu_floor_khz;
break;
}
- if (!gpu_floor_freq)
+ if (num_online_cpus() > 1)
+ gpu_floor_freq = max(gpu_floor_freq,
+ dcvs_pdata->gpu_max_nom_khz);
+
+ if (!gpu_floor_freq) {
+ mutex_unlock(&gpu_floor_mutex);
return;
+ }
for (i = GPU_OFFSET; i < CORES_MAX; i++) {
gpu = &core_list[i];
if (gpu->dcvs_core_id == -1)
continue;
- if (gpu->set_floor_frequency)
+
+ if (gpu->pending_freq != STOP_FREQ_CHANGE &&
+ gpu->set_floor_frequency)
gpu->set_floor_frequency(gpu->type_core_num,
gpu_floor_freq);
}
+
+ mutex_unlock(&gpu_floor_mutex);
}
static int __msm_dcvs_change_freq(struct dcvs_core *core)
@@ -295,21 +313,16 @@
uint32_t ret1 = 0;
spin_lock_irqsave(&core->pending_freq_lock, flags);
+ if (core->pending_freq == STOP_FREQ_CHANGE)
+ goto out;
repeat:
BUG_ON(!core->pending_freq);
- if (core->pending_freq == STOP_FREQ_CHANGE)
- BUG();
requested_freq = core->pending_freq;
time_start = core->time_start;
core->time_start = ns_to_ktime(0);
- if (requested_freq < 0) {
- requested_freq = -1 * requested_freq;
- core->pending_freq = STOP_FREQ_CHANGE;
- } else {
- core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
- }
+ core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
if (requested_freq == core->actual_freq)
goto out;
@@ -318,7 +331,7 @@
if (core->type == MSM_DCVS_CORE_TYPE_CPU &&
core->type_core_num == 0)
- apply_gpu_floor(requested_freq);
+ msm_dcvs_apply_gpu_floor(requested_freq);
/**
* Call the frequency sink driver to change the frequency
@@ -458,10 +471,7 @@
}
if (new_freq == STOP_FREQ_CHANGE) {
- if (core->pending_freq == NO_OUTSTANDING_FREQ_CHANGE)
- core->pending_freq = STOP_FREQ_CHANGE;
- else if (core->pending_freq > 0)
- core->pending_freq = -1 * core->pending_freq;
+ core->pending_freq = STOP_FREQ_CHANGE;
return;
}
@@ -537,6 +547,36 @@
return HRTIMER_NORESTART;
}
+int msm_dcvs_update_algo_params(void)
+{
+ static struct msm_dcvs_algo_param curr_params;
+ static DEFINE_MUTEX(param_update_mutex);
+ struct msm_dcvs_algo_param *new_params;
+ int cpu, ret = 0;
+
+ mutex_lock(¶m_update_mutex);
+ new_params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param;
+
+ if (memcmp(&curr_params, new_params,
+ sizeof(struct msm_dcvs_algo_param))) {
+ for_each_possible_cpu(cpu) {
+ ret = msm_dcvs_scm_set_algo_params(CPU_OFFSET + cpu,
+ new_params);
+ if (ret) {
+ pr_err("scm set algo params failed on cpu %d, ret %d\n",
+ cpu, ret);
+ mutex_unlock(¶m_update_mutex);
+ return ret;
+ }
+ }
+ memcpy(&curr_params, new_params,
+ sizeof(struct msm_dcvs_algo_param));
+ }
+
+ mutex_unlock(¶m_update_mutex);
+ return ret;
+}
+
/* Helper functions and macros for sysfs nodes for a core */
#define CORE_FROM_ATTRIBS(attr, name) \
container_of(container_of(attr, struct core_attribs, name), \
@@ -591,12 +631,9 @@
} else { \
uint32_t old_val = core->algo_param._name; \
core->algo_param._name = val; \
- ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id, \
- &core->algo_param); \
+ ret = msm_dcvs_update_algo_params(); \
if (ret) { \
core->algo_param._name = old_val; \
- __err("Error(%d) in setting %d for algo param %s\n",\
- ret, val, __stringify(_name)); \
} \
} \
return count; \
@@ -936,7 +973,6 @@
core->get_frequency = get_frequency;
core->idle_enable = idle_enable;
core->set_floor_frequency = set_floor_frequency;
- core->pending_freq = STOP_FREQ_CHANGE;
core->info = info;
if (type == MSM_DCVS_CORE_TYPE_CPU)
@@ -1102,10 +1138,18 @@
0, core->actual_freq, &freq, &ret1);
core->idle_enable(core->type_core_num,
MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+
+ if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+ mutex_lock(&gpu_floor_mutex);
+
spin_lock_irqsave(&core->pending_freq_lock, flags);
/* flush out all the pending freq changes */
request_freq_change(core, STOP_FREQ_CHANGE);
spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+ if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+ mutex_unlock(&gpu_floor_mutex);
+
force_stop_slack_timer(core);
return 0;
@@ -1230,8 +1274,10 @@
goto done;
}
- for (i = 0; i < CORES_MAX; i++)
+ for (i = 0; i < CORES_MAX; i++) {
core_list[i].dcvs_core_id = -1;
+ core_list[i].pending_freq = STOP_FREQ_CHANGE;
+ }
done:
return ret;
}
diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c
index 407be6a..94b546a 100644
--- a/arch/arm/mach-msm/msm_mpdecision.c
+++ b/arch/arm/mach-msm/msm_mpdecision.c
@@ -391,6 +391,8 @@
break;
}
msm_mpd.hpupdate = HPUPDATE_WAITING;
+ msm_dcvs_apply_gpu_floor(0);
+ msm_dcvs_update_algo_params();
}
return 0;
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 7829d8d..34fd8d2 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -395,7 +395,8 @@
pr_debug("ocmem: Disabled br clock\n");
}
-static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
+static struct ocmem_plat_data * __devinit parse_dt_config
+ (struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index 689e015..8b56775 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -304,6 +304,7 @@
for (i = 0; i < list->num_chunks; i++) {
if (!chunks[i].ddr_paddr ||
+ !IS_ALIGNED(chunks[i].ddr_paddr, MIN_CHUNK_SIZE) ||
chunks[i].size < MIN_CHUNK_SIZE ||
!IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) {
pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n",
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 6ae7544..6e8d127 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -303,6 +303,7 @@
return IRQ_HANDLED;
}
+ disable_irq_nosync(drv->irq);
drv->restart_inprogress = true;
restart_wcnss(drv);
@@ -324,7 +325,6 @@
pil_shutdown(&drv->desc);
flush_delayed_work(&drv->cancel_vote_work);
wcnss_flush_delayed_boot_votes();
- disable_irq_nosync(drv->irq);
return 0;
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 7652d74..07cbe19 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -423,7 +423,7 @@
struct mba_data *drv = subsys_to_drv(subsys);
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
/* MBA doesn't support shutdown */
pil_shutdown(&drv->q6->desc);
return 0;
@@ -435,7 +435,7 @@
int ret;
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
/*
* At this time, the modem is shutdown. Therefore this function cannot
* run concurrently with either the watchdog bite error handler or the
@@ -527,7 +527,7 @@
struct mba_data *drv = subsys_to_drv(desc);
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
ret = pil_boot(&drv->q6->desc);
if (ret)
@@ -729,8 +729,8 @@
return -ENOMEM;
platform_set_drvdata(pdev, drv);
- of_property_read_u32(pdev->dev.of_node, "qcom,is_loadable",
- &drv->is_loadable);
+ drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,is-loadable");
if (drv->is_loadable) {
ret = pil_mss_loadable_init(drv, pdev);
if (ret)
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 3b31b9f..0e75cae 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -121,10 +121,10 @@
write_pen_release(-1);
/* clear the IPC pending SPI */
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, cpu)) {
raise_clear_spi(cpu, false);
clear_pending_spi(cpu_data[cpu].ipc_irq);
- power_collapsed = 0;
+ per_cpu(power_collapsed, cpu) = 0;
}
/*
@@ -216,7 +216,7 @@
* GDFS which needs to be brought out by raising an SPI.
*/
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, cpu)) {
gic_configure_and_raise(cpu_data[cpu].ipc_irq, cpu);
raise_clear_spi(cpu, true);
} else {
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index faefe34..bd61feb 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/pm.h
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -27,7 +27,7 @@
#define msm_secondary_startup NULL
#endif
-extern int power_collapsed;
+DECLARE_PER_CPU(int, power_collapsed);
struct msm_pm_irq_calls {
unsigned int (*irq_pending)(void);
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index ae2a4bc..96c1218 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -3,7 +3,7 @@
* MSM Power Management Routines
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2012 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2012 The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -75,8 +75,9 @@
MSM_PM_DEBUG_HOTPLUG = BIT(7),
};
+DEFINE_PER_CPU(int, power_collapsed);
+
static int msm_pm_debug_mask;
-int power_collapsed;
module_param_named(
debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
);
@@ -565,7 +566,7 @@
__raw_writel(0, APPS_PWRDOWN);
mb();
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, 1)) {
/*
* enable the SCU while coming out of power
* collapse.
@@ -983,6 +984,7 @@
* path by reading the MPA5_GDFS_CNT_VAL register.
*/
if (cpu_is_msm8625()) {
+ int cpu;
/*
* on system reset, default value of MPA5_GDFS_CNT_VAL
* is = 0x0, later modem reprogram this value to
@@ -997,7 +999,11 @@
/* 8x25Q */
if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
if (val != 0x000F0002) {
- power_collapsed = 1;
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+ per_cpu(power_collapsed, cpu) = 1;
+ }
/*
* override DBGNOPOWERDN and program the GDFS
* count val
@@ -1008,7 +1014,11 @@
modem_early_exit = 1;
} else {
if (val != 0x00030002) {
- power_collapsed = 1;
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+ per_cpu(power_collapsed, cpu) = 1;
+ }
/*
* override DBGNOPOWERDN and program the GDFS
* count val
diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c
index c426ff9..f0b83f9 100644
--- a/arch/arm/mach-msm/pmu.c
+++ b/arch/arm/mach-msm/pmu.c
@@ -16,8 +16,20 @@
#include <mach/irqs.h>
#include <mach/socinfo.h>
+/*
+ * If a GIC is present, then all IRQ's < 32 are PPI's and can only be
+ * requested and free'd using the percpu IRQ API.
+ * If a VIC is present, then only the traditional request, free API works.
+ *
+ * All MPCore's have GIC's. The Cortex A5 however may or may not be MPcore, but
+ * it still has a GIC. Except, the 7x27a, which is an A5 and yet has a VIC.
+ * So if the chip is A5 but does not have a GIC, default to the traditional
+ * IRQ {request, free}_irq API.
+ */
+
#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP) \
- || defined(CONFIG_ARCH_MSM8625)
+ || defined(CONFIG_ARCH_MSM8625) || \
+ (defined(CONFIG_ARCH_MSM_CORTEX_A5) && !defined(CONFIG_MSM_VIC))
static DEFINE_PER_CPU(u32, pmu_irq_cookie);
static void enable_irq_callback(void *info)
@@ -141,8 +153,10 @@
* handlers to call the percpu API.
* Defaults to unicore API {request,free}_irq().
* See arch/arm/kernel/perf_event.c
+ * See Comment above on the A5 and MSM_VIC.
*/
-#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP)
+#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP) \
+ || (defined(CONFIG_ARCH_MSM_CORTEX_A5) && !defined(CONFIG_MSM_VIC))
cpu_pmu_device.dev.platform_data = &multicore_data;
#endif
diff --git a/arch/arm/mach-msm/qdsp6v2/Makefile b/arch/arm/mach-msm/qdsp6v2/Makefile
index 66d6bda..08a6de6 100644
--- a/arch/arm/mach-msm/qdsp6v2/Makefile
+++ b/arch/arm/mach-msm/qdsp6v2/Makefile
@@ -20,10 +20,10 @@
obj-$(CONFIG_MSM_QDSP6_CODECS) += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o
obj-$(CONFIG_MSM_QDSP6_CODECS) += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_utils_aio.o
obj-$(CONFIG_MSM_QDSP6_CODECS) += rtac.o q6audio_v1.o q6audio_v1_aio.o
-obj-$(CONFIG_MSM_QDSP6_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_evrc.o audio_qcelp.o amrwb_in.o
+obj-$(CONFIG_MSM_QDSP6_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_amrwbplus.o audio_evrc.o audio_qcelp.o amrwb_in.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_utils_aio.o
obj-$(CONFIG_MSM_QDSP6V2_CODECS) += rtac_v2.o q6audio_v2.o q6audio_v2_aio.o
-obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_evrc.o audio_qcelp.o amrwb_in.o
+obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_evrc.o audio_qcelp.o amrwb_in.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
obj-$(CONFIG_MSM_ULTRASOUND_A) += ultrasound/version_a/
diff --git a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
index c28e403..02dbece 100644
--- a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
+++ b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <mach/subsystem_restart.h>
#include <mach/qdsp6v2/apr.h>
+#include <linux/of_device.h>
#define Q6_PIL_GET_DELAY_MS 100
@@ -30,25 +31,41 @@
{
struct adsp_loader_private *priv;
int rc = 0;
+ const char *adsp_dt = "qcom,adsp-state";
+ u32 adsp_state;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
-
- priv->pil_h = subsystem_get("adsp");
- if (IS_ERR(priv->pil_h)) {
- pr_err("%s: pil get adsp failed, error:%d\n", __func__, rc);
- devm_kfree(&pdev->dev, priv);
- goto fail;
+ rc = of_property_read_u32(pdev->dev.of_node, adsp_dt, &adsp_state);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: ADSP state = %x\n", __func__, adsp_state);
+ return rc;
}
- /* Query the DSP to check if resources are available */
- msleep(Q6_PIL_GET_DELAY_MS);
+ if (adsp_state == APR_SUBSYS_DOWN) {
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /* Set the state of the ADSP in APR driver */
- apr_set_q6_state(APR_SUBSYS_LOADED);
+ platform_set_drvdata(pdev, priv);
+
+ priv->pil_h = subsystem_get("adsp");
+ if (IS_ERR(priv->pil_h)) {
+ pr_err("%s: pil get adsp failed, error:%d\n",
+ __func__, rc);
+ devm_kfree(&pdev->dev, priv);
+ goto fail;
+ }
+
+ /* Query the DSP to check if resources are available */
+ msleep(Q6_PIL_GET_DELAY_MS);
+
+ /* Set the state of the ADSP in APR driver */
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ } else if (adsp_state == APR_SUBSYS_LOADED) {
+ dev_dbg(&pdev->dev,
+ "%s:MDM9x25 ADSP state = %x\n", __func__, adsp_state);
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ }
/* Query for MMPM API */
@@ -62,7 +79,8 @@
struct adsp_loader_private *priv;
priv = platform_get_drvdata(pdev);
- subsystem_put(priv->pil_h);
+ if (priv != NULL)
+ subsystem_put(priv->pil_h);
pr_info("%s: Q6/ADSP image is unloaded\n", __func__);
return 0;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index a24b9ec..cad845f 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -730,12 +730,19 @@
static int deregister_memory(void)
{
+ int i;
+
if (atomic64_read(&acdb_data.mem_len)) {
mutex_lock(&acdb_data.acdb_mutex);
+ atomic64_set(&acdb_data.mem_len, 0);
atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
atomic_set(&acdb_data.vocproc_total_cal_size, 0);
atomic_set(&acdb_data.vocvol_total_cal_size, 0);
- atomic64_set(&acdb_data.mem_len, 0);
+
+ for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
+ kfree(acdb_data.col_data[i]);
+ acdb_data.col_data[i] = NULL;
+ }
ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
ion_free(acdb_data.ion_client, acdb_data.ion_handle);
ion_client_destroy(acdb_data.ion_client);
@@ -747,12 +754,19 @@
static int register_memory(void)
{
int result;
+ int i;
unsigned long paddr;
void *kvptr;
unsigned long kvaddr;
unsigned long mem_len;
mutex_lock(&acdb_data.acdb_mutex);
+ for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
+ acdb_data.col_data[i] = kmalloc(MAX_COL_SIZE, GFP_KERNEL);
+ atomic_set(&acdb_data.vocproc_col_cal[i].cal_kvaddr,
+ (uint32_t)acdb_data.col_data[i]);
+ }
+
acdb_data.ion_client =
msm_ion_client_create(UINT_MAX, "audio_acdb_client");
if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
@@ -1029,7 +1043,6 @@
static int acdb_release(struct inode *inode, struct file *f)
{
- int i;
s32 result = 0;
atomic_dec(&usage_count);
@@ -1038,11 +1051,6 @@
pr_debug("%s: ref count %d!\n", __func__,
atomic_read(&usage_count));
- for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
- kfree(acdb_data.col_data[i]);
- acdb_data.col_data[i] = NULL;
- }
-
if (atomic_read(&usage_count) >= 1)
result = -EBUSY;
else
@@ -1067,16 +1075,10 @@
static int __init acdb_init(void)
{
- int i;
memset(&acdb_data, 0, sizeof(acdb_data));
mutex_init(&acdb_data.acdb_mutex);
atomic_set(&usage_count, 0);
- for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
- acdb_data.col_data[i] = kmalloc(MAX_COL_SIZE, GFP_KERNEL);
- atomic_set(&acdb_data.vocproc_col_cal[i].cal_kvaddr,
- (uint32_t)acdb_data.col_data[i]);
- }
return misc_register(&acdb_misc);
}
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c b/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
new file mode 100644
index 0000000..2889c14
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/audio_amrwbplus.c
@@ -0,0 +1,234 @@
+/* amr-wbplus audio output device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/msm_audio_amrwbplus.h>
+#include "audio_utils_aio.h"
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations audio_amrwbplus_debug_fops = {
+ .read = audio_aio_debug_read,
+ .open = audio_aio_debug_open,
+};
+static void config_debug_fs(struct q6audio_aio *audio)
+{
+ if (audio != NULL) {
+ char name[sizeof("msm_amrwbplus_") + 5];
+ snprintf(name, sizeof(name), "msm_amrwbplus_%04x",
+ audio->ac->session);
+ audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ NULL, (void *)audio,
+ &audio_amrwbplus_debug_fops);
+ if (IS_ERR(audio->dentry))
+ pr_debug("debugfs_create_file failed\n");
+ }
+}
+#else
+static void config_debug_fs(struct q6audio_aio *)
+{
+}
+#endif
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct asm_amrwbplus_cfg q6_amrwbplus_cfg;
+ struct msm_audio_amrwbplus_config_v2 *amrwbplus_drv_config;
+ struct q6audio_aio *audio = file->private_data;
+ int rc = 0;
+
+ switch (cmd) {
+ case AUDIO_START: {
+ pr_err("%s[%p]: AUDIO_START session_id[%d]\n", __func__,
+ audio, audio->ac->session);
+ if (audio->feedback == NON_TUNNEL_MODE) {
+ /* Configure PCM output block */
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
+ if (rc < 0) {
+ pr_err("pcm output block config failed\n");
+ break;
+ }
+ }
+ amrwbplus_drv_config =
+ (struct msm_audio_amrwbplus_config_v2 *)audio->codec_cfg;
+
+ q6_amrwbplus_cfg.size_bytes =
+ amrwbplus_drv_config->size_bytes;
+ q6_amrwbplus_cfg.version =
+ amrwbplus_drv_config->version;
+ q6_amrwbplus_cfg.num_channels =
+ amrwbplus_drv_config->num_channels;
+ q6_amrwbplus_cfg.amr_band_mode =
+ amrwbplus_drv_config->amr_band_mode;
+ q6_amrwbplus_cfg.amr_dtx_mode =
+ amrwbplus_drv_config->amr_dtx_mode;
+ q6_amrwbplus_cfg.amr_frame_fmt =
+ amrwbplus_drv_config->amr_frame_fmt;
+ q6_amrwbplus_cfg.amr_lsf_idx =
+ amrwbplus_drv_config->amr_lsf_idx;
+
+ rc = q6asm_media_format_block_amrwbplus(audio->ac,
+ &q6_amrwbplus_cfg);
+ if (rc < 0) {
+ pr_err("q6asm_media_format_block_amrwb+ failed...\n");
+ break;
+ }
+ rc = audio_aio_enable(audio);
+ audio->eos_rsp = 0;
+ audio->eos_flag = 0;
+ if (!rc) {
+ audio->enabled = 1;
+ } else {
+ audio->enabled = 0;
+ pr_err("Audio Start procedure failed rc=%d\n", rc);
+ break;
+ }
+ pr_debug("%s:AUDIO_START sessionid[%d]enable[%d]\n", __func__,
+ audio->ac->session,
+ audio->enabled);
+ if (audio->stopped == 1)
+ audio->stopped = 0;
+ break;
+ }
+ case AUDIO_GET_AMRWBPLUS_CONFIG_V2: {
+ if ((audio) && (arg) && (audio->codec_cfg)) {
+ if (copy_to_user((void *)arg, audio->codec_cfg,
+ sizeof(struct msm_audio_amrwbplus_config_v2))) {
+ rc = -EFAULT;
+ pr_err("wb+ config get copy_to_user failed");
+ break;
+ }
+ } else {
+ pr_err("wb+ config v2 invalid parameters..");
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case AUDIO_SET_AMRWBPLUS_CONFIG_V2: {
+ if ((audio) && (arg) && (audio->codec_cfg)) {
+ if (copy_from_user(audio->codec_cfg, (void *)arg,
+ sizeof(struct msm_audio_amrwbplus_config_v2))) {
+ rc = -EFAULT;
+ pr_err("wb+ config set copy_to_user_failed");
+ break;
+ }
+ } else {
+ pr_err("wb+ config invalid parameters..");
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ default:
+ pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio);
+ rc = audio->codec_ioctl(file, cmd, arg);
+ }
+ return rc;
+}
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+ struct q6audio_aio *audio = NULL;
+ int rc = 0;
+
+ audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
+
+ if (audio == NULL) {
+ pr_err("kzalloc failed for amrwb+ decode driver\n");
+ return -ENOMEM;
+ }
+ audio->codec_cfg =
+ kzalloc(sizeof(struct msm_audio_amrwbplus_config_v2), GFP_KERNEL);
+ if (audio->codec_cfg == NULL) {
+ pr_err("%s:failed kzalloc for amrwb+ config structure",
+ __func__);
+ kfree(audio);
+ return -ENOMEM;
+ }
+ audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
+
+ audio->ac =
+ q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio);
+
+ if (!audio->ac) {
+ pr_err("Could not allocate memory for audio client\n");
+ kfree(audio->codec_cfg);
+ kfree(audio);
+ return -ENOMEM;
+ }
+
+ /* open in T/NT mode */
+ if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
+ rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
+ FORMAT_AMR_WB_PLUS);
+ if (rc < 0) {
+ pr_err("amrwbplus NT mode Open failed rc=%d\n", rc);
+ rc = -ENODEV;
+ goto fail;
+ }
+ audio->feedback = NON_TUNNEL_MODE;
+ audio->buf_cfg.frames_per_buf = 0x01;
+ audio->buf_cfg.meta_info_enable = 0x01;
+ } else if ((file->f_mode & FMODE_WRITE) &&
+ !(file->f_mode & FMODE_READ)) {
+ rc = q6asm_open_write(audio->ac, FORMAT_AMR_WB_PLUS);
+ if (rc < 0) {
+ pr_err("wb+ T mode Open failed rc=%d\n", rc);
+ rc = -ENODEV;
+ goto fail;
+ }
+ audio->feedback = TUNNEL_MODE;
+ audio->buf_cfg.meta_info_enable = 0x00;
+ } else {
+ pr_err("audio_amrwbplus Not supported mode\n");
+ rc = -EACCES;
+ goto fail;
+ }
+ rc = audio_aio_open(audio, file);
+
+ config_debug_fs(audio);
+ pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__,
+ audio->feedback,
+ audio->ac->session);
+ return 0;
+fail:
+ q6asm_audio_client_free(audio->ac);
+ kfree(audio->codec_cfg);
+ kfree(audio);
+ return rc;
+}
+
+static const struct file_operations audio_amrwbplus_fops = {
+ .owner = THIS_MODULE,
+ .open = audio_open,
+ .release = audio_aio_release,
+ .unlocked_ioctl = audio_ioctl,
+ .fsync = audio_aio_fsync,
+};
+
+struct miscdevice audio_amrwbplus_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msm_amrwbplus",
+ .fops = &audio_amrwbplus_fops,
+};
+
+static int __init audio_amrwbplus_init(void)
+{
+ return misc_register(&audio_amrwbplus_misc);
+}
+
+device_initcall(audio_amrwbplus_init);
diff --git a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
index 2d0607c..409d796 100644
--- a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
+++ b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
@@ -24,6 +24,7 @@
#include <mach/qdsp6v2/rtac.h>
#include "q6audio_common.h"
#include <sound/q6afe-v2.h>
+#include <sound/apr_audio-v2.h>
#ifndef CONFIG_RTAC
@@ -45,10 +46,6 @@
#else
-#define VOICE_CMD_SET_PARAM 0x00011006
-#define VOICE_CMD_GET_PARAM 0x00011007
-#define VOICE_EVT_GET_PARAM_ACK 0x00011008
-
/* Max size of payload (buf size - apr header) */
#define MAX_PAYLOAD_SIZE 4076
#define RTAC_MAX_ACTIVE_DEVICES 4
@@ -353,7 +350,7 @@
return;
}
-static int get_voice_index(u32 cvs_handle)
+static int get_voice_index_cvs(u32 cvs_handle)
{
u32 i;
@@ -367,6 +364,32 @@
return 0;
}
+static int get_voice_index_cvp(u32 cvp_handle)
+{
+ u32 i;
+
+ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvp_handle == cvp_handle)
+ return i;
+ }
+
+ pr_err("%s: No voice index for CVP handle %d found returning 0\n",
+ __func__, cvp_handle);
+ return 0;
+}
+
+static int get_voice_index(u32 mode, u32 handle)
+{
+ if (mode == RTAC_CVP)
+ return get_voice_index_cvp(handle);
+ if (mode == RTAC_CVS)
+ return get_voice_index_cvs(handle);
+
+ pr_err("%s: Invalid mode %d, returning 0\n",
+ __func__, mode);
+ return 0;
+}
+
/* ADM APR */
void rtac_set_adm_handle(void *handle)
@@ -402,6 +425,7 @@
if (payload_size > rtac_adm_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_adm_user_buf_size, payload_size);
+ rtac_adm_payload_size = 0;
goto done;
}
memcpy(rtac_adm_buffer + sizeof(u32), payload, payload_size);
@@ -470,6 +494,7 @@
/* Set globals for copy of returned payload */
rtac_adm_user_buf_size = count;
+
/* Copy buffer to in-band payload */
if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
buf + 3 * sizeof(u32), payload_size)) {
@@ -572,6 +597,7 @@
if (payload_size > rtac_asm_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_asm_user_buf_size, payload_size);
+ rtac_asm_payload_size = 0;
goto done;
}
memcpy(rtac_asm_buffer + sizeof(u32), payload, payload_size);
@@ -619,6 +645,7 @@
__func__);
goto done;
}
+
if (session_id > (SESSION_MAX + 1)) {
pr_err("%s: Invalid Session = %d\n", __func__, session_id);
goto done;
@@ -739,6 +766,7 @@
if (payload_size > rtac_voice_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_voice_user_buf_size, payload_size);
+ rtac_voice_payload_size = 0;
goto done;
}
memcpy(rtac_voice_buffer + sizeof(u32), payload, payload_size);
@@ -753,7 +781,7 @@
u32 count = 0;
u32 bytes_returned = 0;
u32 payload_size;
- u16 dest_port;
+ u32 dest_port;
struct apr_hdr voice_params;
pr_debug("%s\n", __func__);
@@ -818,10 +846,10 @@
voice_params.src_svc = 0;
voice_params.src_domain = APR_DOMAIN_APPS;
voice_params.src_port = voice_session_id[
- get_voice_index(dest_port)];
+ get_voice_index(mode, dest_port)];
voice_params.dest_svc = 0;
voice_params.dest_domain = APR_DOMAIN_MODEM;
- voice_params.dest_port = dest_port;
+ voice_params.dest_port = (u16)dest_port;
voice_params.token = 0;
voice_params.opcode = opcode;
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
index f566e82..94192cf 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
@@ -16,15 +16,38 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/input/mt.h>
+#include <linux/syscalls.h>
#include "usfcdev.h"
+#define UNDEF_ID 0xffffffff
+#define SLOT_CMD_ID 0
+#define MAX_RETRIES 10
+
+
+
+enum usdev_event_status {
+ USFCDEV_EVENT_ENABLED,
+ USFCDEV_EVENT_DISABLING,
+ USFCDEV_EVENT_DISABLED,
+};
+
struct usfcdev_event {
bool (*match_cb)(uint16_t, struct input_dev *dev);
bool registered_event;
- bool filter;
+ bool interleaved;
+ enum usdev_event_status event_status;
};
static struct usfcdev_event s_usfcdev_events[MAX_EVENT_TYPE_NUM];
+struct usfcdev_input_command {
+ unsigned int type;
+ unsigned int code;
+ unsigned int value;
+};
+
+static long s_usf_pid;
+
static bool usfcdev_filter(struct input_handle *handle,
unsigned int type, unsigned int code, int value);
static bool usfcdev_match(struct input_handler *handler,
@@ -83,6 +106,22 @@
},
};
+static struct usfcdev_input_command initial_clear_cmds[] = {
+ {EV_ABS, ABS_PRESSURE, 0},
+ {EV_KEY, BTN_TOUCH, 0},
+};
+
+static struct usfcdev_input_command slot_clear_cmds[] = {
+ {EV_ABS, ABS_MT_SLOT, 0},
+ {EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
+};
+
+static struct usfcdev_input_command no_filter_cmds[] = {
+ {EV_ABS, ABS_MT_SLOT, 0},
+ {EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID},
+ {EV_SYN, SYN_REPORT, 0},
+};
+
static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev)
{
bool rc = false;
@@ -91,7 +130,7 @@
pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
if (s_usfcdev_events[ind].registered_event &&
- s_usfcdev_events[ind].match_cb) {
+ s_usfcdev_events[ind].match_cb) {
rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
}
@@ -139,16 +178,39 @@
static bool usfcdev_filter(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
+ uint16_t i = 0;
uint16_t ind = (uint16_t)handle->handler->minor;
+ bool rc = (s_usfcdev_events[ind].event_status != USFCDEV_EVENT_ENABLED);
- pr_debug("%s: event_type=%d; filter=%d; abs_xy=%ld; abs_y_mt[]=%ld\n",
- __func__,
- ind,
- s_usfcdev_events[ind].filter,
- usfc_tsc_ids[0].absbit[0],
- usfc_tsc_ids[1].absbit[1]);
+ if (s_usf_pid == sys_getpid()) {
+ /* Pass events from usfcdev driver */
+ rc = false;
+ pr_debug("%s: event_type=%d; type=%d; code=%d; val=%d",
+ __func__,
+ ind,
+ type,
+ code,
+ value);
+ } else if (s_usfcdev_events[ind].event_status ==
+ USFCDEV_EVENT_DISABLING) {
+ uint32_t u_value = value;
+ s_usfcdev_events[ind].interleaved = true;
+ /* Pass events for freeing slots from TSC driver */
+ for (i = 0; i < ARRAY_SIZE(no_filter_cmds); ++i) {
+ if ((no_filter_cmds[i].type == type) &&
+ (no_filter_cmds[i].code == code) &&
+ (no_filter_cmds[i].value <= u_value)) {
+ rc = false;
+ pr_debug("%s: no_filter_cmds[%d]; %d",
+ __func__,
+ i,
+ no_filter_cmds[i].value);
+ break;
+ }
+ }
+ }
- return s_usfcdev_events[ind].filter;
+ return rc;
}
bool usfcdev_register(
@@ -175,7 +237,7 @@
s_usfcdev_events[event_type_ind].registered_event = true;
s_usfcdev_events[event_type_ind].match_cb = match_cb;
- s_usfcdev_events[event_type_ind].filter = false;
+ s_usfcdev_events[event_type_ind].event_status = USFCDEV_EVENT_ENABLED;
ret = input_register_handler(&s_usfc_handlers[event_type_ind]);
if (!ret) {
rc = true;
@@ -209,7 +271,64 @@
event_type_ind);
s_usfcdev_events[event_type_ind].registered_event = false;
s_usfcdev_events[event_type_ind].match_cb = NULL;
- s_usfcdev_events[event_type_ind].filter = false;
+ s_usfcdev_events[event_type_ind].event_status =
+ USFCDEV_EVENT_ENABLED;
+
+ }
+}
+
+static inline void usfcdev_send_cmd(
+ struct input_dev *dev,
+ struct usfcdev_input_command cmd)
+{
+ input_event(dev, cmd.type, cmd.code, cmd.value);
+}
+
+static void usfcdev_clean_dev(uint16_t event_type_ind)
+{
+ struct input_dev *dev = NULL;
+ int i;
+ int j;
+ int retries = 0;
+
+ if (event_type_ind >= MAX_EVENT_TYPE_NUM) {
+ pr_err("%s: wrong input: event_type_ind=%d\n",
+ __func__,
+ event_type_ind);
+ return;
+ }
+
+ dev = s_usfc_handles[event_type_ind].dev;
+
+ for (i = 0; i < ARRAY_SIZE(initial_clear_cmds); i++)
+ usfcdev_send_cmd(dev, initial_clear_cmds[i]);
+ input_sync(dev);
+
+ /* Send commands to free all slots */
+ for (i = 0; i < dev->mtsize; i++) {
+ s_usfcdev_events[event_type_ind].interleaved = false;
+ if (input_mt_get_value(&(dev->mt[i]), ABS_MT_TRACKING_ID) < 0) {
+ pr_debug("%s: skipping slot %d",
+ __func__, i);
+ continue;
+ }
+ slot_clear_cmds[SLOT_CMD_ID].value = i;
+ for (j = 0; j < ARRAY_SIZE(slot_clear_cmds); j++)
+ usfcdev_send_cmd(dev, slot_clear_cmds[j]);
+
+ if (s_usfcdev_events[event_type_ind].interleaved) {
+ pr_debug("%s: interleaved(%d): slot(%d)",
+ __func__, i, dev->slot);
+ if (retries++ < MAX_RETRIES) {
+ --i;
+ continue;
+ }
+ pr_warning("%s: index(%d) reached max retires",
+ __func__, i);
+ }
+
+ retries = 0;
+ input_sync(dev);
}
}
@@ -225,12 +344,22 @@
}
if (s_usfcdev_events[event_type_ind].registered_event) {
- s_usfcdev_events[event_type_ind].filter = filter;
+
pr_debug("%s: event_type[%d]; filter=%d\n",
__func__,
event_type_ind,
filter
);
+ if (filter) {
+ s_usfcdev_events[event_type_ind].event_status =
+ USFCDEV_EVENT_DISABLING;
+ s_usf_pid = sys_getpid();
+ usfcdev_clean_dev(event_type_ind);
+ s_usfcdev_events[event_type_ind].event_status =
+ USFCDEV_EVENT_DISABLED;
+ } else
+ s_usfcdev_events[event_type_ind].event_status =
+ USFCDEV_EVENT_ENABLED;
} else {
pr_err("%s: event_type[%d] isn't registered\n",
__func__,
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index d1c61fe..bb33283 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -659,19 +659,6 @@
return uV;
}
-static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct rpm_regulator *reg = rdev_get_drvdata(rdev);
- int uV = 0;
-
- if (selector == 0)
- uV = reg->min_uV;
- else if (selector == 1)
- uV = reg->max_uV;
-
- return uV;
-}
-
static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
@@ -1030,7 +1017,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1043,7 +1029,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1056,7 +1041,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1069,7 +1053,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1089,7 +1072,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.enable_time = rpm_vreg_enable_time,
};
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 764fbeb..a59b338 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -152,6 +152,8 @@
LIST_HEAD(msm_rpm_ack_list);
+static DECLARE_COMPLETION(data_ready);
+
static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
struct msm_rpm_kvp_data *kvp)
{
@@ -226,9 +228,6 @@
memcpy(handle->kvp[i].value, data, size);
handle->kvp[i].valid = true;
- if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
- msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
-
return 0;
}
@@ -340,7 +339,7 @@
switch (event) {
case SMD_EVENT_DATA:
- queue_work(msm_rpm_smd_wq, &pdata->work);
+ complete(&data_ready);
break;
case SMD_EVENT_OPEN:
complete(&pdata->smd_open);
@@ -530,17 +529,19 @@
int errno;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
- if (!spin_trylock(&msm_rpm_data.smd_lock_read))
- return;
- while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
- if (msm_rpm_read_smd_data(buf)) {
- break;
+ while (1) {
+ wait_for_completion(&data_ready);
+
+ spin_lock(&msm_rpm_data.smd_lock_read);
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ if (msm_rpm_read_smd_data(buf))
+ break;
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
}
- msg_id = msm_rpm_get_msg_id_from_ack(buf);
- errno = msm_rpm_get_error_from_ack(buf);
- msm_rpm_process_ack(msg_id, errno);
+ spin_unlock(&msm_rpm_data.smd_lock_read);
}
- spin_unlock(&msm_rpm_data.smd_lock_read);
}
#define DEBUG_PRINT_BUFFER_SIZE 512
@@ -732,6 +733,11 @@
memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
tmpbuff += cdata->kvp[i].nbytes;
+
+ if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
+ &cdata->kvp[i]);
+
}
if (msm_rpm_debug_mask
@@ -892,6 +898,9 @@
msm_rpm_free_list_entry(elem);
wait_ack_cleanup:
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ complete(&data_ready);
return rc;
}
EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
@@ -1013,6 +1022,7 @@
msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
if (!msm_rpm_smd_wq)
return -EINVAL;
+ queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
}
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/arch/arm/mach-msm/saw-regulator.c b/arch/arm/mach-msm/saw-regulator.c
index 6762648..0a81a33 100644
--- a/arch/arm/mach-msm/saw-regulator.c
+++ b/arch/arm/mach-msm/saw-regulator.c
@@ -54,11 +54,17 @@
struct regulator_dev *rdev;
char *name;
int uV;
+ int last_set_uV;
+ unsigned vlevel;
+ bool online;
};
/* Minimum core operating voltage */
#define MIN_CORE_VOLTAGE 950000
+/* Specifies an uninitialized voltage */
+#define INVALID_VOLTAGE -1
+
/* Specifies the PMIC internal slew rate in uV/us. */
#define REGULATOR_SLEW_RATE 1250
@@ -69,12 +75,32 @@
return vreg->uV;
}
+static int _set_voltage(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = msm_spm_set_vdd(rdev_get_id(rdev), vreg->vlevel);
+ if (!rc) {
+ if (vreg->uV > vreg->last_set_uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((vreg->uV - vreg->last_set_uV) /
+ REGULATOR_SLEW_RATE);
+ }
+ vreg->last_set_uV = vreg->uV;
+ } else {
+ pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
+ vreg->uV = vreg->last_set_uV;
+ }
+
+ return rc;
+}
+
static int saw_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
unsigned *selector)
{
struct saw_vreg *vreg = rdev_get_drvdata(rdev);
int uV = min_uV;
- int rc;
u8 vprog, band;
if (uV < FTSMPS_BAND1_UV_MIN && max_uV >= FTSMPS_BAND1_UV_MIN)
@@ -119,23 +145,51 @@
return -EINVAL;
}
- rc = msm_spm_set_vdd(rdev_get_id(rdev), band | vprog);
- if (!rc) {
- if (uV > vreg->uV) {
- /* Wait for voltage to stabalize. */
- udelay((uV - vreg->uV) / REGULATOR_SLEW_RATE);
- }
- vreg->uV = uV;
- } else {
- pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
- }
+ vreg->vlevel = band | vprog;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _set_voltage(rdev);
+}
+
+static int saw_enable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ if (vreg->uV != vreg->last_set_uV)
+ rc = _set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
return rc;
}
+static int saw_disable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int saw_is_enabled(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
static struct regulator_ops saw_ops = {
.get_voltage = saw_get_voltage,
.set_voltage = saw_set_voltage,
+ .enable = saw_enable,
+ .disable = saw_disable,
+ .is_enabled = saw_is_enabled,
};
static int __devinit saw_probe(struct platform_device *pdev)
@@ -168,12 +222,13 @@
goto free_vreg;
}
- vreg->desc.name = vreg->name;
- vreg->desc.id = pdev->id;
- vreg->desc.ops = &saw_ops;
- vreg->desc.type = REGULATOR_VOLTAGE;
- vreg->desc.owner = THIS_MODULE;
- vreg->uV = MIN_CORE_VOLTAGE;
+ vreg->desc.name = vreg->name;
+ vreg->desc.id = pdev->id;
+ vreg->desc.ops = &saw_ops;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->uV = INVALID_VOLTAGE;
+ vreg->last_set_uV = MIN_CORE_VOLTAGE;
vreg->rdev = regulator_register(&vreg->desc, &pdev->dev,
init_data, vreg, NULL);
@@ -233,5 +288,4 @@
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAW regulator driver");
-MODULE_VERSION("1.0");
MODULE_ALIAS("platform:saw-regulator");
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 6013efc..f4dae89 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -204,10 +204,13 @@
return ret;
}
-static u32 cacheline_size;
-
static void scm_inv_range(unsigned long start, unsigned long end)
{
+ u32 cacheline_size, ctr;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
start = round_down(start, cacheline_size);
end = round_up(end, cacheline_size);
outer_inv_range(start, end);
@@ -444,13 +447,3 @@
}
EXPORT_SYMBOL(scm_get_feat_version);
-static int scm_init(void)
-{
- u32 ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- return 0;
-}
-early_initcall(scm_init);
diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c
index 1bea82a..ff68d81 100644
--- a/arch/arm/mach-msm/smd_rpcrouter.c
+++ b/arch/arm/mach-msm/smd_rpcrouter.c
@@ -545,12 +545,13 @@
D("%s: registering device %x\n",
__func__, board_info->dev->prog);
list_del(&board_info->list);
+ spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
rc = platform_device_register(&board_info->dev->pdev);
if (rc)
pr_err("%s: board dev register failed %d\n",
__func__, rc);
kfree(board_info);
- break;
+ return;
}
}
spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 6cb9339..2743547 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -303,7 +303,13 @@
[154] = MSM_CPU_8930AB,
[155] = MSM_CPU_8930AB,
[156] = MSM_CPU_8930AB,
- [157] = MSM_CPU_8930AB
+ [157] = MSM_CPU_8930AB,
+
+ /* 8625Q IDs */
+ [168] = MSM_CPU_8625Q,
+ [169] = MSM_CPU_8625Q,
+ [170] = MSM_CPU_8625Q,
+
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 3af066d..212ad77 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -1012,6 +1012,19 @@
};
#endif /* CONFIG_LOCAL_TIMERS */
+#ifdef CONFIG_ARCH_MSM8625
+static void fixup_msm8625_timer(void)
+{
+ struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
+ struct msm_clock *gpt = &msm_clocks[MSM_CLOCK_GPT];
+ dgt->irq = MSM8625_INT_DEBUG_TIMER_EXP;
+ gpt->irq = MSM8625_INT_GP_TIMER_EXP;
+ global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
+}
+#else
+static inline void fixup_msm8625_timer(void) { };
+#endif
+
static void __init msm_timer_init(void)
{
int i;
@@ -1032,11 +1045,8 @@
gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT
| MSM_CLOCK_FLAGS_ODD_MATCH_WRITE
| MSM_CLOCK_FLAGS_DELAYED_WRITE_POST;
- if (cpu_is_msm8625()) {
- dgt->irq = MSM8625_INT_DEBUG_TIMER_EXP;
- gpt->irq = MSM8625_INT_GP_TIMER_EXP;
- global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
- }
+ if (cpu_is_msm8625())
+ fixup_msm8625_timer();
} else if (cpu_is_qsd8x50()) {
dgt->freq = 4800000;
gpt->regbase = MSM_TMR_BASE;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e2cd0120..0ebc2b9 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -375,29 +375,61 @@
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
+unsigned long memory_hole_offset;
+EXPORT_SYMBOL(memory_hole_offset);
+unsigned long memory_hole_start;
+EXPORT_SYMBOL(memory_hole_start);
+unsigned long memory_hole_end;
+EXPORT_SYMBOL(memory_hole_end);
+
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-unsigned long membank0_size;
-EXPORT_SYMBOL(membank0_size);
-unsigned long membank1_start;
-EXPORT_SYMBOL(membank1_start);
-
-void __init find_membank0_hole(void)
+void find_memory_hole(void)
{
- sort(&meminfo.bank, meminfo.nr_banks,
- sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+ int i;
+ unsigned long hole_start;
+ unsigned long hole_size;
- membank0_size = meminfo.bank[0].size;
- membank1_start = meminfo.bank[1].start;
+ /*
+ * Find the start and end of the hole, using meminfo
+ * if it hasnt been found already.
+ */
+ if (memory_hole_start == 0 && memory_hole_end == 0) {
+ for (i = 0; i < (meminfo.nr_banks - 1); i++) {
+ if ((meminfo.bank[i].start + meminfo.bank[i].size) !=
+ meminfo.bank[i+1].start) {
+ if (meminfo.bank[i].start + meminfo.bank[i].size
+ <= MAX_HOLE_ADDRESS) {
+
+ hole_start = meminfo.bank[i].start +
+ meminfo.bank[i].size;
+ hole_size = meminfo.bank[i+1].start -
+ hole_start;
+
+ if (memory_hole_start == 0 &&
+ memory_hole_end == 0) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start +
+ hole_size;
+ } else if ((memory_hole_end -
+ memory_hole_start) <= hole_size) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start +
+ hole_size;
+ }
+ }
+ }
+ }
+ }
+ memory_hole_offset = memory_hole_start - PHYS_OFFSET;
}
+
#endif
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
-#ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-#endif
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1cb6cba..8575f78 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -961,7 +961,7 @@
int i, j, highmem = 0;
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
- find_membank0_hole();
+ find_memory_hole();
#endif
#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 5fd98ea..5751d28 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -15,7 +15,7 @@
config IOSCHED_TEST
tristate "Test I/O scheduler"
depends on DEBUG_FS
- default m
+ default y
---help---
The test I/O scheduler is a duplicate of the noop scheduler with
addition of test utlity.
diff --git a/block/test-iosched.c b/block/test-iosched.c
index 52070ac..71e8669 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -663,7 +663,7 @@
test_name = ptd->test_info.get_test_case_str_fn(ptd);
else
test_name = "Unknown testcase";
- test_pr_info("%s: Starting test %s\n", __func__, test_name);
+ test_pr_info("%s: Starting test %s", __func__, test_name);
ret = prepare_test(ptd);
if (ret) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8994d6d..93b8ef1 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1059,7 +1059,7 @@
dpm_wait_for_children(dev, async);
if (async_error)
- return 0;
+ goto Complete;
/*
* If a device configured to wake up the system from sleep states
@@ -1072,7 +1072,7 @@
if (pm_wakeup_pending()) {
async_error = -EBUSY;
- return 0;
+ goto Complete;
}
data.dev = dev;
@@ -1141,6 +1141,7 @@
del_timer_sync(&timer);
destroy_timer_on_stack(&timer);
+ Complete:
complete_all(&dev->power.completion);
if (error)
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index 8f8707f..91fcdfc 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -30,9 +30,9 @@
SDIO Transport Layer for DIAG Router
endmenu
-menu "HSIC support for DIAG"
+menu "HSIC/SMUX support for DIAG"
-config DIAG_BRIDGE_CODE
+config DIAGFWD_BRIDGE_CODE
depends on USB_QCOM_DIAG_BRIDGE
default y
bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
index 6ecc970..c9204ea 100644
--- a/drivers/char/diag/Makefile
+++ b/drivers/char/diag/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_DIAG_CHAR) := diagchar.o
obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_hsic.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_smux.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_hsic.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_smux.o
diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5cd5ce9..e78a2aa 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -134,33 +134,37 @@
pr_alert("diag: No matching PID for DCI data\n");
/* Using PID of client process, find client buffer */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (curr_client_pid == driver->dci_client_tbl[i].client->tgid) {
- /* copy pkt rsp in client buf */
- entry = &(driver->dci_client_tbl[i]);
- if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
- pr_alert("diag: create capacity for pkt rsp\n");
- entry->total_capacity += 8+write_len;
- temp_buf = krealloc(entry->dci_data,
- entry->total_capacity, GFP_KERNEL);
- if (!temp_buf) {
- pr_err("diag: DCI realloc failed\n");
- break;
- } else {
- entry->dci_data = temp_buf;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (curr_client_pid ==
+ driver->dci_client_tbl[i].client->tgid) {
+ /* copy pkt rsp in client buf */
+ entry = &(driver->dci_client_tbl[i]);
+ if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
+ pr_alert("diag: create capacity for pkt rsp\n");
+ entry->total_capacity += 8+write_len;
+ temp_buf = krealloc(entry->dci_data,
+ entry->total_capacity, GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err("diag: DCI realloc failed\n");
+ break;
+ } else {
+ entry->dci_data = temp_buf;
+ }
}
- }
- *(int *)(entry->dci_data+entry->data_len) =
+ *(int *)(entry->dci_data+entry->data_len) =
DCI_PKT_RSP_TYPE;
- entry->data_len += 4;
- *(int *)(entry->dci_data+entry->data_len) = write_len;
- entry->data_len += 4;
- memcpy(entry->dci_data+entry->data_len,
- buf+4+cmd_code_len, write_len);
- entry->data_len += write_len;
- /* delete immediate response entry */
- if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
- driver->req_tracking_tbl[index].pid = 0;
- break;
+ entry->data_len += 4;
+ *(int *)(entry->dci_data+entry->data_len)
+ = write_len;
+ entry->data_len += 4;
+ memcpy(entry->dci_data+entry->data_len,
+ buf+4+cmd_code_len, write_len);
+ entry->data_len += write_len;
+ /* delete immediate response entry */
+ if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
+ driver->req_tracking_tbl[index].pid = 0;
+ break;
+ }
}
}
}
@@ -408,6 +412,7 @@
int count, set_mask, num_codes, byte_index, bit_index, event_id;
uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
uint8_t *event_mask_ptr;
+ int offset = 0;
/* This is Pkt request/response transaction */
if (*(int *)temp > 0) {
@@ -463,10 +468,12 @@
} else if (*(int *)temp == DCI_LOG_TYPE) {
/* find client id and table */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client->tgid ==
- current->tgid) {
- found = 1;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (driver->dci_client_tbl[i].client->tgid ==
+ current->tgid) {
+ found = 1;
+ break;
+ }
}
}
if (!found) {
@@ -495,6 +502,7 @@
*/
log_mask_ptr = head_log_mask_ptr;
found = 0;
+ offset = 0;
while (log_mask_ptr) {
if (*log_mask_ptr == equip_id) {
found = 1;
@@ -505,6 +513,7 @@
pr_debug("diag: did not find equip id = %x at %p\n",
equip_id, log_mask_ptr);
log_mask_ptr += 514;
+ offset += 514;
}
}
if (!found) {
@@ -517,21 +526,25 @@
*log_mask_ptr |= byte_mask;
else
*log_mask_ptr &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_log_mask(
+ offset, byte_index,
+ byte_mask);
temp += 2;
count++;
ret = DIAG_DCI_NO_ERROR;
}
- /* add to cumulative mask */
- update_dci_cumulative_log_mask(i);
/* send updated mask to peripherals */
diag_send_dci_log_mask(driver->ch_cntl);
} else if (*(int *)temp == DCI_EVENT_TYPE) {
/* find client id and table */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client->tgid ==
- current->tgid) {
- found = 1;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (driver->dci_client_tbl[i].client->tgid ==
+ current->tgid) {
+ found = 1;
+ break;
+ }
}
}
if (!found) {
@@ -561,12 +574,12 @@
*(event_mask_ptr + byte_index) |= byte_mask;
else
*(event_mask_ptr + byte_index) &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_event_mask(byte_index, byte_mask);
temp += sizeof(int);
count++;
ret = DIAG_DCI_NO_ERROR;
}
- /* add to cumulative mask */
- update_dci_cumulative_event_mask(i);
/* send updated mask to peripherals */
diag_send_dci_event_mask(driver->ch_cntl);
} else {
@@ -575,16 +588,29 @@
return ret;
}
-void update_dci_cumulative_event_mask(int client_index)
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
{
int i;
- uint8_t *update_ptr = dci_cumulative_event_mask;
uint8_t *event_mask_ptr;
+ uint8_t *update_ptr = dci_cumulative_event_mask;
+ bool is_set = false;
mutex_lock(&dci_event_mask_mutex);
- event_mask_ptr = driver->dci_client_tbl[client_index].dci_event_mask;
- for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
- *(update_ptr+i) |= *(event_mask_ptr+i);
+ update_ptr += offset;
+ for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ event_mask_ptr =
+ driver->dci_client_tbl[i].dci_event_mask;
+ event_mask_ptr += offset;
+ if ((*event_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the event mask set */
+ break;
+ }
+ }
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
mutex_unlock(&dci_event_mask_mutex);
}
@@ -624,27 +650,39 @@
mutex_unlock(&driver->diag_cntl_mutex);
}
-void update_dci_cumulative_log_mask(int client_index)
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+ uint8_t byte_mask)
{
- int i, j;
+ int i;
uint8_t *update_ptr = dci_cumulative_log_mask;
- uint8_t *log_mask_ptr =
- driver->dci_client_tbl[client_index].dci_log_mask;
+ uint8_t *log_mask_ptr;
+ bool is_set = false;
mutex_lock(&dci_log_mask_mutex);
- *update_ptr = 0; /* add first equip id */
- /* skip the first equip id */
- update_ptr++; log_mask_ptr++;
- for (i = 0; i < 16; i++) {
- for (j = 0; j < 513; j++) {
- *update_ptr |= *log_mask_ptr;
- update_ptr++;
- log_mask_ptr++;
+ *update_ptr = 0;
+ /* set the equipment IDs */
+ for (i = 0; i < 16; i++)
+ *(update_ptr + (i*514)) = i;
+
+ update_ptr += offset;
+ /* update the dirty bit */
+ *(update_ptr+1) = 1;
+ update_ptr = update_ptr + byte_index;
+ for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ log_mask_ptr =
+ (driver->dci_client_tbl[i].dci_log_mask);
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ if ((*log_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the log mask set */
+ break;
}
- *update_ptr = i+1;
- update_ptr++;
- log_mask_ptr++;
}
+
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
mutex_unlock(&dci_log_mask_mutex);
}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index afcabcc..435c750 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -85,11 +85,12 @@
void extract_dci_pkt_rsp(unsigned char *buf);
/* DCI Log streaming functions */
void create_dci_log_mask_tbl(unsigned char *tbl_buf);
-void update_dci_cumulative_log_mask(int client_index);
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+ uint8_t byte_mask);
void diag_send_dci_log_mask(smd_channel_t *ch);
void extract_dci_log(unsigned char *buf);
/* DCI event streaming functions */
-void update_dci_cumulative_event_mask(int client_index);
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask);
void diag_send_dci_event_mask(smd_channel_t *ch);
void extract_dci_events(unsigned char *buf);
void create_dci_event_mask_tbl(unsigned char *tbl_buf);
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index ed0f08e..7863f74 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include "diagchar.h"
#include "diagfwd.h"
+#include "diagfwd_bridge.h"
#define DEBUG_BUF_SIZE 4096
static struct dentry *diag_dbgfs_dent;
@@ -195,8 +196,8 @@
return ret;
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-static ssize_t diag_dbgfs_read_hsic(struct file *file, char __user *ubuf,
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char *buf;
@@ -220,13 +221,17 @@
"count_hsic_write_pool: %d\n"
"diag_hsic_pool: %x\n"
"diag_hsic_write_pool: %x\n"
- "write_len_mdm: %d\n"
+ "HSIC write_len: %d\n"
"num_hsic_buf_tbl_entries: %d\n"
- "usb_mdm_connected: %d\n"
- "diag_read_mdm_work: %d\n"
+ "HSIC usb_connected: %d\n"
+ "HSIC diag_read_work: %d\n"
"diag_read_hsic_work: %d\n"
"diag_disconnect_work: %d\n"
- "diag_usb_read_complete_work: %d\n",
+ "diag_usb_read_complete_work: %d\n"
+ "smux ch: %d"
+ "smux enabled %d"
+ "smux in busy %d"
+ "smux connected %d",
driver->hsic_ch,
driver->hsic_inited,
driver->hsic_device_enabled,
@@ -238,13 +243,17 @@
driver->count_hsic_write_pool,
(unsigned int)driver->diag_hsic_pool,
(unsigned int)driver->diag_hsic_write_pool,
- driver->write_len_mdm,
+ diag_bridge[HSIC].write_len,
driver->num_hsic_buf_tbl_entries,
- driver->usb_mdm_connected,
- work_pending(&(driver->diag_read_mdm_work)),
+ diag_bridge[HSIC].usb_connected,
+ work_pending(&(diag_bridge[HSIC].diag_read_work)),
work_pending(&(driver->diag_read_hsic_work)),
work_pending(&(driver->diag_disconnect_work)),
- work_pending(&(driver->diag_usb_read_complete_work)));
+ work_pending(&(diag_bridge[HSIC].usb_read_complete_work)),
+ driver->lcid,
+ driver->diag_smux_enabled,
+ driver->in_busy_smux,
+ driver->smux_connected);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
@@ -252,8 +261,8 @@
return ret;
}
-const struct file_operations diag_dbgfs_hsic_ops = {
- .read = diag_dbgfs_read_hsic,
+const struct file_operations diag_dbgfs_bridge_ops = {
+ .read = diag_dbgfs_read_bridge,
};
#endif
@@ -284,9 +293,9 @@
debugfs_create_file("work_pending", 0444, diag_dbgfs_dent, 0,
&diag_dbgfs_workpending_ops);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
- debugfs_create_file("hsic", 0444, diag_dbgfs_dent, 0,
- &diag_dbgfs_hsic_ops);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_bridge_ops);
#endif
diag_dbgfs_table_index = 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index de3cf52..d1ec5f2 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -273,7 +273,9 @@
struct diag_request *usb_read_mdm_ptr;
struct diag_request *write_ptr_mdm;
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ /* common for all bridges */
+ struct work_struct diag_disconnect_work;
/* SGLTE variables */
int lcid;
unsigned char *buf_in_smux;
@@ -290,18 +292,6 @@
int in_busy_hsic_read_on_device;
int in_busy_hsic_write;
struct work_struct diag_read_hsic_work;
- struct mutex bridge_mutex;
- /* USB MDM channel variables */
- int usb_mdm_connected;
- int read_len_mdm;
- int write_len_mdm;
- unsigned char *usb_buf_mdm_out;
- struct usb_diag_ch *mdm_ch;
- struct workqueue_struct *diag_bridge_wq;
- struct work_struct diag_read_mdm_work;
- struct work_struct diag_disconnect_work;
- struct work_struct diag_usb_read_complete_work;
- struct diag_request *usb_read_mdm_ptr;
int count_hsic_pool;
int count_hsic_write_pool;
unsigned int poolsize_hsic;
@@ -316,5 +306,6 @@
#endif
};
+extern struct diag_bridge_dev *diag_bridge;
extern struct diagchar_dev *driver;
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 7b17ce4..645d916 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -32,13 +32,14 @@
#ifdef CONFIG_DIAG_SDIO_PIPE
#include "diagfwd_sdio.h"
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
#include "diagfwd_hsic.h"
#include "diagfwd_smux.h"
#endif
#include <linux/timer.h>
#include "diag_debugfs.h"
#include "diag_masks.h"
+#include "diagfwd_bridge.h"
MODULE_DESCRIPTION("Diag Char Driver");
MODULE_LICENSE("GPL v2");
@@ -127,7 +128,7 @@
mutex_unlock(&driver->diagchar_mutex);
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diag_clear_hsic_tbl(void)
{
int i;
@@ -278,7 +279,7 @@
if (driver->logging_process_id == current->tgid) {
driver->logging_mode = USB_MODE;
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diag_clear_hsic_tbl();
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
@@ -708,7 +709,7 @@
#ifdef CONFIG_DIAG_SDIO_PIPE
driver->in_busy_sdio = 1;
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_disconnect_bridge(0);
diag_clear_hsic_tbl();
#endif
@@ -737,7 +738,7 @@
queue_work(driver->diag_sdio_wq,
&(driver->diag_read_sdio_work));
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_connect_bridge(0);
#endif
}
@@ -745,13 +746,13 @@
else if (temp == USB_MODE && driver->logging_mode
== NO_LOGGING_MODE) {
diagfwd_disconnect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_disconnect_bridge(0);
#endif
} else if (temp == NO_LOGGING_MODE && driver->logging_mode
== USB_MODE) {
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_connect_bridge(0);
#endif
} else if (temp == USB_MODE && driver->logging_mode
@@ -781,14 +782,14 @@
queue_work(driver->diag_sdio_wq,
&(driver->diag_read_sdio_work));
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
#endif
} else if (temp == MEMORY_DEVICE_MODE &&
driver->logging_mode == USB_MODE) {
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diag_clear_hsic_tbl();
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
@@ -814,7 +815,7 @@
struct diag_dci_client_tbl *entry;
int index = -1, i = 0, ret = 0;
int num_data = 0, data_type;
-#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAG_BRIDGE_CODE)
+#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAGFWD_BRIDGE_CODE)
int mdm_token = MDM_TOKEN;
#endif
@@ -833,7 +834,7 @@
if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) && (driver->
logging_mode == MEMORY_DEVICE_MODE)) {
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
unsigned long spin_lock_flags;
struct diag_write_device hsic_buf_tbl[NUM_HSIC_BUF_TBL_ENTRIES];
#endif
@@ -969,7 +970,7 @@
driver->in_busy_sdio = 0;
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
spin_lock_irqsave(&driver->hsic_spinlock, spin_lock_flags);
for (i = 0; i < driver->poolsize_hsic_write; i++) {
hsic_buf_tbl[i].buf = driver->hsic_buf_tbl[i].buf;
@@ -1120,14 +1121,17 @@
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
/* check the current client and copy its data */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- entry = &(driver->dci_client_tbl[i]);
- if (entry && (current->tgid == entry->client->tgid)) {
- COPY_USER_SPACE_OR_EXIT(buf+4,
- entry->data_len, 4);
- COPY_USER_SPACE_OR_EXIT(buf+8,
- *(entry->dci_data), entry->data_len);
- entry->data_len = 0;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ entry = &(driver->dci_client_tbl[i]);
+ if (entry && (current->tgid ==
+ entry->client->tgid)) {
+ COPY_USER_SPACE_OR_EXIT(buf+4,
+ entry->data_len, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+8,
+ *(entry->dci_data), entry->data_len);
+ entry->data_len = 0;
+ break;
+ }
}
}
driver->data_ready[index] ^= DCI_DATA_TYPE;
@@ -1199,7 +1203,8 @@
/* Check masks for On-Device logging */
if (driver->mask_check) {
- if (!mask_request_validate(driver->user_space_data)) {
+ if (!mask_request_validate(driver->user_space_data +
+ token_offset)) {
pr_alert("diag: mask request Invalid\n");
return -EFAULT;
}
@@ -1224,7 +1229,7 @@
}
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
/* send masks to 9k too */
if (driver->hsic_ch && (payload_size > 0) && remote_data) {
/* wait sending mask updates if HSIC ch not ready */
@@ -1530,6 +1535,13 @@
return 0;
}
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_disconnect_work_fn(struct work_struct *w)
+{
+ diagfwd_disconnect_bridge(1);
+}
+#endif
+
#ifdef CONFIG_DIAG_SDIO_PIPE
void diag_sdio_fn(int type)
{
@@ -1544,16 +1556,14 @@
inline void diag_sdio_fn(int type) {}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-void diag_bridge_fn(int type)
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+void diagfwd_bridge_fn(int type)
{
- if (type == INIT)
- diagfwd_bridge_init();
- else if (type == EXIT)
+ if (type == EXIT)
diagfwd_bridge_exit();
}
#else
-inline void diag_bridge_fn(int type) {}
+inline void diagfwd_bridge_fn(int type) { }
#endif
static int __init diagchar_init(void)
@@ -1563,6 +1573,12 @@
pr_debug("diagfwd initializing ..\n");
driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ diag_bridge = kzalloc(MAX_BRIDGES * sizeof(struct diag_bridge_dev),
+ GFP_KERNEL);
+ if (!diag_bridge)
+ pr_warning("diag: could not allocate memory for bridge\n");
+#endif
if (driver) {
driver->used = 0;
@@ -1607,10 +1623,16 @@
diag_debugfs_init();
diag_masks_init();
diagfwd_init();
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ diagfwd_bridge_init(HSIC);
+ diagfwd_bridge_init(SMUX);
+ INIT_WORK(&(driver->diag_disconnect_work),
+ diag_disconnect_work_fn);
+#endif
diagfwd_cntl_init();
driver->dci_state = diag_dci_init();
diag_sdio_fn(INIT);
- diag_bridge_fn(INIT);
+
pr_debug("diagchar initializing ..\n");
driver->num = 1;
driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
@@ -1645,7 +1667,7 @@
diagfwd_cntl_exit();
diag_masks_exit();
diag_sdio_fn(EXIT);
- diag_bridge_fn(EXIT);
+ diagfwd_bridge_fn(EXIT);
return -1;
}
@@ -1659,7 +1681,7 @@
diagfwd_cntl_exit();
diag_masks_exit();
diag_sdio_fn(EXIT);
- diag_bridge_fn(EXIT);
+ diagfwd_bridge_fn(EXIT);
diag_debugfs_cleanup();
diagchar_cleanup();
printk(KERN_INFO "done diagchar exit\n");
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 978b63b..cee4c96 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -40,6 +40,7 @@
#endif
#include "diag_dci.h"
#include "diag_masks.h"
+#include "diagfwd_bridge.h"
#define MODE_CMD 41
#define RESET_ID 2
@@ -327,7 +328,7 @@
}
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
unsigned long flags;
int foundIndex = -1;
@@ -337,7 +338,7 @@
if (driver->hsic_buf_tbl[i].length == 0) {
driver->hsic_buf_tbl[i].buf = buf;
driver->hsic_buf_tbl[i].length =
- driver->write_len_mdm;
+ diag_bridge[HSIC].write_len;
driver->num_hsic_buf_tbl_entries++;
foundIndex = i;
break;
@@ -349,7 +350,7 @@
else
pr_debug("diag: ENQUEUE HSIC buf ptr and length is %x , %d\n",
(unsigned int)buf,
- driver->write_len_mdm);
+ diag_bridge[HSIC].write_len);
}
#endif
for (i = 0; i < driver->num_clients; i++)
@@ -386,10 +387,10 @@
&(driver->diag_read_sdio_work));
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
if (driver->hsic_ch)
- queue_work(driver->diag_bridge_wq,
+ queue_work(diag_bridge[HSIC].wq,
&(driver->diag_read_hsic_work));
}
#endif
@@ -436,7 +437,7 @@
"while USB write\n");
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
if (driver->hsic_device_enabled) {
struct diag_request *write_ptr_mdm;
@@ -447,9 +448,10 @@
if (write_ptr_mdm) {
write_ptr_mdm->buf = buf;
write_ptr_mdm->length =
- driver->write_len_mdm;
- err = usb_diag_write(driver->mdm_ch,
- write_ptr_mdm);
+ diag_bridge[HSIC].write_len;
+ write_ptr_mdm->context = (void *)HSIC;
+ err = usb_diag_write(
+ diag_bridge[HSIC].ch, write_ptr_mdm);
/* Return to the pool immediately */
if (err) {
diagmem_free(driver,
@@ -463,14 +465,16 @@
err = -1;
}
} else {
- pr_err("diag: Incorrect hsic data "
+ pr_err("diag: Incorrect HSIC data "
"while USB write\n");
err = -1;
}
} else if (proc_num == SMUX_DATA) {
write_ptr->buf = buf;
+ write_ptr->context = (void *)SMUX;
pr_debug("diag: writing SMUX data\n");
- err = usb_diag_write(driver->mdm_ch, write_ptr);
+ err = usb_diag_write(diag_bridge[SMUX].ch,
+ write_ptr);
}
#endif
APPEND_DEBUG('d');
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 0000000..75fdeb4
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,355 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diagmem.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+
+struct diag_bridge_dev *diag_bridge;
+
+/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
+int diagfwd_connect_bridge(int process_cable)
+{
+ int i;
+
+ pr_debug("diag: in %s\n", __func__);
+
+ for (i = 0; i < MAX_BRIDGES; i++)
+ if (diag_bridge[i].enabled)
+ connect_bridge(process_cable, i);
+ return 0;
+}
+
+void connect_bridge(int process_cable, int index)
+{
+ int err;
+
+ mutex_lock(&diag_bridge[index].bridge_mutex);
+ /* If the usb cable is being connected */
+ if (process_cable) {
+ err = usb_diag_alloc_req(diag_bridge[index].ch, N_MDM_WRITE,
+ N_MDM_READ);
+ if (err)
+ pr_err("diag: unable to alloc USB req on mdm ch err:%d\n",
+ err);
+
+ diag_bridge[index].usb_connected = 1;
+ }
+
+ if (index == SMUX && driver->diag_smux_enabled) {
+ driver->in_busy_smux = 0;
+ diagfwd_connect_smux();
+ } else if (index == HSIC && driver->hsic_device_enabled) {
+ driver->in_busy_hsic_read_on_device = 0;
+ driver->in_busy_hsic_write = 0;
+ /* If the HSIC (diag_bridge) platform device is not open */
+ if (!driver->hsic_device_opened) {
+ err = diag_bridge_open(&hsic_diag_bridge_ops);
+ if (err) {
+ pr_err("diag: HSIC channel open error: %d\n",
+ err);
+ } else {
+ pr_debug("diag: opened HSIC channel\n");
+ driver->hsic_device_opened = 1;
+ }
+ } else {
+ pr_debug("diag: HSIC channel already open\n");
+ }
+ /*
+ * Turn on communication over usb mdm and HSIC, if the HSIC
+ * device driver is enabled and opened
+ */
+ if (driver->hsic_device_opened) {
+ driver->hsic_ch = 1;
+ /* Poll USB mdm channel to check for data */
+ if (driver->logging_mode == USB_MODE)
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
+ /* Poll HSIC channel to check for data */
+ queue_work(diag_bridge[HSIC].wq,
+ &driver->diag_read_hsic_work);
+ }
+ }
+ mutex_unlock(&diag_bridge[index].bridge_mutex);
+}
+
+/*
+ * diagfwd_disconnect_bridge is called when the USB mdm channel
+ * is disconnected. So disconnect should happen for all bridges
+ */
+int diagfwd_disconnect_bridge(int process_cable)
+{
+ int i;
+ pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
+
+ for (i = 0; i < MAX_BRIDGES; i++) {
+ if (diag_bridge[i].enabled) {
+ mutex_lock(&diag_bridge[i].bridge_mutex);
+ /* If the usb cable is being disconnected */
+ if (process_cable) {
+ diag_bridge[i].usb_connected = 0;
+ usb_diag_free_req(diag_bridge[i].ch);
+ }
+
+ if (i == HSIC && driver->hsic_device_enabled &&
+ driver->logging_mode != MEMORY_DEVICE_MODE) {
+ driver->in_busy_hsic_read_on_device = 1;
+ driver->in_busy_hsic_write = 1;
+ /* Turn off communication over usb and HSIC */
+ diag_hsic_close();
+ } else if (i == SMUX && driver->diag_smux_enabled &&
+ driver->logging_mode == USB_MODE) {
+ driver->in_busy_smux = 1;
+ driver->lcid = LCID_INVALID;
+ driver->smux_connected = 0;
+ /* Turn off communication over usb and smux */
+ msm_smux_close(LCID_VALID);
+ }
+ mutex_unlock(&diag_bridge[i].bridge_mutex);
+ }
+ }
+ return 0;
+}
+
+/* Called after the asychronous usb_diag_read() on mdm channel is complete */
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+{
+ int index = (int)(diag_read_ptr->context);
+
+ /* The read of the usb on the mdm (not HSIC/SMUX) has completed */
+ diag_bridge[index].read_len = diag_read_ptr->actual;
+
+ if (index == SMUX) {
+ if (driver->diag_smux_enabled) {
+ diagfwd_read_complete_smux();
+ return 0;
+ } else {
+ pr_warning("diag: incorrect callback for smux\n");
+ }
+ }
+
+ /* If SMUX not enabled, check for HSIC */
+ driver->in_busy_hsic_read_on_device = 0;
+ if (!driver->hsic_ch) {
+ pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
+ return 0;
+ }
+
+ /*
+ * The read of the usb driver on the mdm channel has completed.
+ * If there is no write on the HSIC in progress, check if the
+ * read has data to pass on to the HSIC. If so, pass the usb
+ * mdm data on to the HSIC.
+ */
+ if (!driver->in_busy_hsic_write && diag_bridge[HSIC].usb_buf_out &&
+ (diag_bridge[HSIC].read_len > 0)) {
+
+ /*
+ * Initiate the HSIC write. The HSIC write is
+ * asynchronous. When complete the write
+ * complete callback function will be called
+ */
+ int err;
+ driver->in_busy_hsic_write = 1;
+ err = diag_bridge_write(diag_bridge[HSIC].usb_buf_out,
+ diag_bridge[HSIC].read_len);
+ if (err) {
+ pr_err_ratelimited("diag: mdm data on HSIC write err: %d\n",
+ err);
+ /*
+ * If the error is recoverable, then clear
+ * the write flag, so we will resubmit a
+ * write on the next frame. Otherwise, don't
+ * resubmit a write on the next frame.
+ */
+ if ((-ENODEV) != err)
+ driver->in_busy_hsic_write = 0;
+ }
+ }
+
+ /*
+ * If there is no write of the usb mdm data on the
+ * HSIC channel
+ */
+ if (!driver->in_busy_hsic_write)
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
+
+ return 0;
+}
+
+static void diagfwd_bridge_notifier(void *priv, unsigned event,
+ struct diag_request *d_req)
+{
+ int index;
+
+ switch (event) {
+ case USB_DIAG_CONNECT:
+ diagfwd_connect_bridge(1);
+ break;
+ case USB_DIAG_DISCONNECT:
+ queue_work(driver->diag_wq,
+ &driver->diag_disconnect_work);
+ break;
+ case USB_DIAG_READ_DONE:
+ index = (int)(d_req->context);
+ queue_work(diag_bridge[index].wq,
+ &diag_bridge[index].usb_read_complete_work);
+ break;
+ case USB_DIAG_WRITE_DONE:
+ index = (int)(d_req->context);
+ if (index == HSIC && driver->hsic_device_enabled)
+ diagfwd_write_complete_hsic(d_req);
+ else if (index == SMUX && driver->diag_smux_enabled)
+ diagfwd_write_complete_smux();
+ break;
+ default:
+ pr_err("diag: in %s: Unknown event from USB diag:%u\n",
+ __func__, event);
+ break;
+ }
+}
+
+void diagfwd_bridge_init(int index)
+{
+ int ret;
+ unsigned char name[20];
+
+ if (index == HSIC)
+ strlcpy(name, "hsic", sizeof(name));
+ else
+ strlcpy(name, "smux", sizeof(name));
+
+ strlcpy(diag_bridge[index].name, name, sizeof(diag_bridge[index].name));
+ strlcat(name, "_diag_wq", sizeof(diag_bridge[index].name));
+ diag_bridge[index].enabled = 1;
+ diag_bridge[index].wq = create_singlethread_workqueue(name);
+ diag_bridge[index].read_len = 0;
+ diag_bridge[index].write_len = 0;
+ if (diag_bridge[index].usb_buf_out == NULL)
+ diag_bridge[index].usb_buf_out =
+ kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+ if (diag_bridge[index].usb_buf_out == NULL)
+ goto err;
+ if (diag_bridge[index].usb_read_ptr == NULL)
+ diag_bridge[index].usb_read_ptr =
+ kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+ if (diag_bridge[index].usb_read_ptr == NULL)
+ goto err;
+ if (diag_bridge[index].usb_read_ptr->context == NULL)
+ diag_bridge[index].usb_read_ptr->context =
+ kzalloc(sizeof(int), GFP_KERNEL);
+ if (diag_bridge[index].usb_read_ptr->context == NULL)
+ goto err;
+ mutex_init(&diag_bridge[index].bridge_mutex);
+
+ if (index == HSIC) {
+ INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+ diag_usb_read_complete_hsic_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+ INIT_WORK(&(diag_bridge[index].diag_read_work),
+ diag_read_usb_hsic_work_fn);
+ diag_bridge[index].ch = usb_diag_open(DIAG_MDM, (void *)index,
+ diagfwd_bridge_notifier);
+ if (IS_ERR(diag_bridge[index].ch)) {
+ pr_err("diag: Unable to open USB diag MDM channel\n");
+ goto err;
+ }
+#endif
+ /* register HSIC device */
+ ret = platform_driver_register(&msm_hsic_ch_driver);
+ if (ret)
+ pr_err("diag: could not register HSIC device, ret: %d\n",
+ ret);
+ } else if (index == SMUX) {
+ INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+ diag_usb_read_complete_smux_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+ INIT_WORK(&(diag_bridge[index].diag_read_work),
+ diag_read_usb_smux_work_fn);
+ diag_bridge[index].ch = usb_diag_open(DIAG_QSC, (void *)index,
+ diagfwd_bridge_notifier);
+ if (IS_ERR(diag_bridge[index].ch)) {
+ pr_err("diag: Unable to open USB diag QSC channel\n");
+ goto err;
+ }
+#endif
+ ret = platform_driver_register(&msm_diagfwd_smux_driver);
+ if (ret)
+ pr_err("diag: could not register SMUX device, ret: %d\n",
+ ret);
+ }
+ return;
+err:
+ pr_err("diag: Could not initialize for bridge forwarding\n");
+ kfree(diag_bridge[index].usb_buf_out);
+ kfree(driver->hsic_buf_tbl);
+ kfree(driver->write_ptr_mdm);
+ kfree(diag_bridge[index].usb_read_ptr);
+ if (diag_bridge[index].wq)
+ destroy_workqueue(diag_bridge[index].wq);
+ return;
+}
+
+void diagfwd_bridge_exit(void)
+{
+ int i;
+ pr_debug("diag: in %s\n", __func__);
+
+ if (driver->hsic_device_enabled) {
+ diag_hsic_close();
+ driver->hsic_device_enabled = 0;
+ diag_bridge[HSIC].enabled = 0;
+ }
+ driver->hsic_inited = 0;
+ diagmem_exit(driver, POOL_TYPE_ALL);
+ if (driver->diag_smux_enabled) {
+ driver->lcid = LCID_INVALID;
+ kfree(driver->buf_in_smux);
+ driver->diag_smux_enabled = 0;
+ diag_bridge[SMUX].enabled = 0;
+ }
+ platform_driver_unregister(&msm_hsic_ch_driver);
+ platform_driver_unregister(&msm_diagfwd_smux_driver);
+ /* destroy USB MDM specific variables */
+ for (i = 0; i < MAX_BRIDGES; i++) {
+ if (diag_bridge[i].enabled) {
+#ifdef CONFIG_DIAG_OVER_USB
+ if (diag_bridge[i].usb_connected)
+ usb_diag_free_req(diag_bridge[i].ch);
+ usb_diag_close(diag_bridge[i].ch);
+#endif
+ kfree(diag_bridge[i].usb_buf_out);
+ kfree(diag_bridge[i].usb_read_ptr);
+ destroy_workqueue(diag_bridge[i].wq);
+ diag_bridge[i].enabled = 0;
+ }
+ }
+ kfree(driver->hsic_buf_tbl);
+ kfree(driver->write_ptr_mdm);
+}
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 0000000..06e6a96
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+#include "diagfwd.h"
+
+#define MAX_BRIDGES 5
+#define HSIC 0
+#define SMUX 1
+
+int diagfwd_connect_bridge(int);
+void connect_bridge(int, int);
+int diagfwd_disconnect_bridge(int);
+void diagfwd_bridge_init(int index);
+void diagfwd_bridge_exit(void);
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr);
+
+/* Diag-Bridge structure, n bridges can be used at same time
+ * for instance SMUX, HSIC working at same time
+ */
+struct diag_bridge_dev {
+ char name[20];
+ int enabled;
+ struct mutex bridge_mutex;
+ int usb_connected;
+ int read_len;
+ int write_len;
+ unsigned char *usb_buf_out;
+ struct usb_diag_ch *ch;
+ struct workqueue_struct *wq;
+ struct work_struct diag_read_work;
+ struct diag_request *usb_read_ptr;
+ struct work_struct usb_read_complete_work;
+};
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index 7aef01f..3d5eea5 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -31,6 +31,7 @@
#include "diagfwd.h"
#include "diagfwd_hsic.h"
#include "diagfwd_smux.h"
+#include "diagfwd_bridge.h"
#define READ_HSIC_BUF_SIZE 2048
@@ -72,7 +73,7 @@
write_ptrs_available--;
/*
- * No sense queuing a read if the hsic bridge was
+ * No sense queuing a read if the HSIC bridge was
* closed in another thread
*/
if (!driver->hsic_ch)
@@ -82,7 +83,7 @@
POOL_TYPE_HSIC);
if (buf_in_hsic) {
/*
- * Initiate the read from the hsic. The hsic read is
+ * Initiate the read from the HSIC. The HSIC read is
* asynchronous. Once the read is complete the read
* callback function will be called.
*/
@@ -116,7 +117,7 @@
if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
(num_reads_submitted == 0) && (err != -ENODEV) &&
(driver->hsic_ch != 0))
- queue_work(driver->diag_bridge_wq,
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
@@ -127,7 +128,7 @@
if (!driver->hsic_ch) {
/*
- * The hsic channel is closed. Return the buffer to
+ * The HSIC channel is closed. Return the buffer to
* the pool. Do not send it on.
*/
diagmem_free(driver, buf, POOL_TYPE_HSIC);
@@ -149,7 +150,7 @@
* Send data in buf to be written on the
* appropriate device, e.g. USB MDM channel
*/
- driver->write_len_mdm = actual_size;
+ diag_bridge[HSIC].write_len = actual_size;
err = diag_device_write((void *)buf, HSIC_DATA, NULL);
/* If an error, return buffer to the pool */
if (err) {
@@ -170,13 +171,13 @@
}
/*
- * If for some reason there was no hsic data to write to the
+ * If for some reason there was no HSIC data to write to the
* mdm channel, set up another read
*/
if (err &&
((driver->logging_mode == MEMORY_DEVICE_MODE) ||
- (driver->usb_mdm_connected && !driver->hsic_suspend))) {
- queue_work(driver->diag_bridge_wq,
+ (diag_bridge[HSIC].usb_connected && !driver->hsic_suspend))) {
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
}
@@ -195,8 +196,10 @@
if (actual_size < 0)
pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size);
- if (driver->usb_mdm_connected && (driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
+ if (diag_bridge[HSIC].usb_connected &&
+ (driver->logging_mode == USB_MODE))
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
}
static int diag_hsic_suspend(void *ctxt)
@@ -223,12 +226,12 @@
if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
((driver->logging_mode == MEMORY_DEVICE_MODE) ||
- (driver->usb_mdm_connected)))
- queue_work(driver->diag_bridge_wq,
+ (diag_bridge[HSIC].usb_connected)))
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
-static struct diag_bridge_ops hsic_diag_bridge_ops = {
+struct diag_bridge_ops hsic_diag_bridge_ops = {
.ctxt = NULL,
.read_complete_cb = diag_hsic_read_complete_callback,
.write_complete_cb = diag_hsic_write_complete_callback,
@@ -236,7 +239,7 @@
.resume = diag_hsic_resume,
};
-static void diag_hsic_close(void)
+void diag_hsic_close(void)
{
if (driver->hsic_device_enabled) {
driver->hsic_ch = 0;
@@ -257,7 +260,7 @@
{
int err;
- mutex_lock(&driver->bridge_mutex);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
if (driver->hsic_device_enabled) {
if (driver->hsic_device_opened) {
driver->hsic_ch = 0;
@@ -274,112 +277,7 @@
}
}
}
-
- mutex_unlock(&driver->bridge_mutex);
- return 0;
-}
-
-/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
-int diagfwd_connect_bridge(int process_cable)
-{
- int err;
-
- pr_debug("diag: in %s\n", __func__);
-
- mutex_lock(&driver->bridge_mutex);
- /* If the usb cable is being connected */
- if (process_cable) {
- err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE,
- N_MDM_READ);
- if (err)
- pr_err("diag: unable to alloc USB req on mdm"
- " ch err:%d\n", err);
-
- driver->usb_mdm_connected = 1;
- }
-
- if (driver->hsic_device_enabled) {
- driver->in_busy_hsic_read_on_device = 0;
- driver->in_busy_hsic_write = 0;
- } else if (driver->diag_smux_enabled) {
- driver->in_busy_smux = 0;
- diagfwd_connect_smux();
- mutex_unlock(&driver->bridge_mutex);
- return 0;
- }
-
- /* If the hsic (diag_bridge) platform device is not open */
- if (driver->hsic_device_enabled) {
- if (!driver->hsic_device_opened) {
- err = diag_bridge_open(&hsic_diag_bridge_ops);
- if (err) {
- pr_err("diag: HSIC channel open error: %d\n",
- err);
- } else {
- pr_debug("diag: opened HSIC channel\n");
- driver->hsic_device_opened = 1;
- }
- } else {
- pr_debug("diag: HSIC channel already open\n");
- }
-
- /*
- * Turn on communication over usb mdm and hsic, if the hsic
- * device driver is enabled and opened
- */
- if (driver->hsic_device_opened) {
- driver->hsic_ch = 1;
-
- /* Poll USB mdm channel to check for data */
- if (driver->logging_mode == USB_MODE)
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_mdm_work);
-
- /* Poll HSIC channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_hsic_work);
- }
- } else {
- /* The hsic device driver has not yet been enabled */
- pr_info("diag: HSIC channel not yet enabled\n");
- }
-
- mutex_unlock(&driver->bridge_mutex);
- return 0;
-}
-
-/*
- * diagfwd_disconnect_bridge is called when the USB mdm channel
- * is disconnected
- */
-int diagfwd_disconnect_bridge(int process_cable)
-{
- pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
-
- mutex_lock(&driver->bridge_mutex);
-
- /* If the usb cable is being disconnected */
- if (process_cable) {
- driver->usb_mdm_connected = 0;
- usb_diag_free_req(driver->mdm_ch);
- }
-
- if (driver->hsic_device_enabled &&
- driver->logging_mode != MEMORY_DEVICE_MODE) {
- driver->in_busy_hsic_read_on_device = 1;
- driver->in_busy_hsic_write = 1;
- /* Turn off communication over usb mdm and hsic */
- diag_hsic_close();
- } else if (driver->diag_smux_enabled &&
- driver->logging_mode == USB_MODE) {
- driver->in_busy_smux = 1;
- driver->lcid = LCID_INVALID;
- driver->smux_connected = 0;
- /* Turn off communication over usb mdm and smux */
- msm_smux_close(LCID_VALID);
- }
-
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return 0;
}
@@ -403,225 +301,128 @@
return 0;
}
- /* Read data from the hsic */
- queue_work(driver->diag_bridge_wq, &driver->diag_read_hsic_work);
+ /* Read data from the HSIC */
+ queue_work(diag_bridge[HSIC].wq, &driver->diag_read_hsic_work);
return 0;
}
-/* Called after the asychronous usb_diag_read() on mdm channel is complete */
-static int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+void diag_usb_read_complete_hsic_fn(struct work_struct *w)
{
- /* The read of the usb driver on the mdm (not hsic) has completed */
- driver->in_busy_hsic_read_on_device = 0;
- driver->read_len_mdm = diag_read_ptr->actual;
+ diagfwd_read_complete_bridge(diag_bridge[HSIC].usb_read_ptr);
+}
- if (driver->diag_smux_enabled) {
- diagfwd_read_complete_smux();
- return 0;
- }
- /* If SMUX not enabled, check for HSIC */
+
+void diag_read_usb_hsic_work_fn(struct work_struct *work)
+{
if (!driver->hsic_ch) {
- pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
- return 0;
- }
-
- /*
- * The read of the usb driver on the mdm channel has completed.
- * If there is no write on the hsic in progress, check if the
- * read has data to pass on to the hsic. If so, pass the usb
- * mdm data on to the hsic.
- */
- if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out &&
- (driver->read_len_mdm > 0)) {
-
- /*
- * Initiate the hsic write. The hsic write is
- * asynchronous. When complete the write
- * complete callback function will be called
- */
- int err;
- driver->in_busy_hsic_write = 1;
- err = diag_bridge_write(driver->usb_buf_mdm_out,
- driver->read_len_mdm);
- if (err) {
- pr_err_ratelimited("diag: mdm data on hsic write err: %d\n",
- err);
- /*
- * If the error is recoverable, then clear
- * the write flag, so we will resubmit a
- * write on the next frame. Otherwise, don't
- * resubmit a write on the next frame.
- */
- if ((-ENODEV) != err)
- driver->in_busy_hsic_write = 0;
- }
- }
-
- /*
- * If there is no write of the usb mdm data on the
- * hsic channel
- */
- if (!driver->in_busy_hsic_write && (driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
-
- return 0;
-}
-
-static void diagfwd_bridge_notifier(void *priv, unsigned event,
- struct diag_request *d_req)
-{
- switch (event) {
- case USB_DIAG_CONNECT:
- diagfwd_connect_bridge(1);
- break;
- case USB_DIAG_DISCONNECT:
- queue_work(driver->diag_bridge_wq,
- &driver->diag_disconnect_work);
- break;
- case USB_DIAG_READ_DONE:
- queue_work(driver->diag_bridge_wq,
- &driver->diag_usb_read_complete_work);
- break;
- case USB_DIAG_WRITE_DONE:
- if (driver->hsic_device_enabled)
- diagfwd_write_complete_hsic(d_req);
- else if (driver->diag_smux_enabled)
- diagfwd_write_complete_smux();
- break;
- default:
- pr_err("diag: in %s: Unknown event from USB diag:%u\n",
- __func__, event);
- break;
- }
-}
-
-static void diag_usb_read_complete_fn(struct work_struct *w)
-{
- diagfwd_read_complete_bridge(driver->usb_read_mdm_ptr);
-}
-
-static void diag_disconnect_work_fn(struct work_struct *w)
-{
- diagfwd_disconnect_bridge(1);
-}
-
-static void diag_read_mdm_work_fn(struct work_struct *work)
-{
- int ret;
- if (driver->diag_smux_enabled) {
- if (driver->lcid && driver->usb_buf_mdm_out &&
- (driver->read_len_mdm > 0) &&
- driver->smux_connected) {
- ret = msm_smux_write(driver->lcid, NULL,
- driver->usb_buf_mdm_out, driver->read_len_mdm);
- if (ret)
- pr_err("diag: writing to SMUX ch, r = %d,"
- "lcid = %d\n", ret, driver->lcid);
- }
- driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
- driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
- usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+ pr_err("diag: in %s: driver->hsic_ch == 0\n", __func__);
return;
}
-
- /* if SMUX not enabled, check for HSIC */
- if (!driver->hsic_ch) {
- pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
- return;
- }
-
/*
* If there is no data being read from the usb mdm channel
* and there is no mdm channel data currently being written
- * to the hsic
+ * to the HSIC
*/
if (!driver->in_busy_hsic_read_on_device &&
- !driver->in_busy_hsic_write) {
+ !driver->in_busy_hsic_write) {
APPEND_DEBUG('x');
-
/* Setup the next read from usb mdm channel */
driver->in_busy_hsic_read_on_device = 1;
- driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
- driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
- usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+ diag_bridge[HSIC].usb_read_ptr->buf =
+ diag_bridge[HSIC].usb_buf_out;
+ diag_bridge[HSIC].usb_read_ptr->length = USB_MAX_OUT_BUF;
+ diag_bridge[HSIC].usb_read_ptr->context = (void *)HSIC;
+ usb_diag_read(diag_bridge[HSIC].ch,
+ diag_bridge[HSIC].usb_read_ptr);
APPEND_DEBUG('y');
}
-
- /*
- * If for some reason there was no mdm channel read initiated,
+ /* If for some reason there was no mdm channel read initiated,
* queue up the reading of data from the mdm channel
*/
+
if (!driver->in_busy_hsic_read_on_device &&
(driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &(diag_bridge[HSIC].diag_read_work));
}
static int diag_hsic_probe(struct platform_device *pdev)
{
int err = 0;
+
pr_debug("diag: in %s\n", __func__);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
if (!driver->hsic_inited) {
+ spin_lock_init(&driver->hsic_spinlock);
+ driver->num_hsic_buf_tbl_entries = 0;
+ if (driver->hsic_buf_tbl == NULL)
+ driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES
+ * sizeof(struct diag_write_device), GFP_KERNEL);
+ if (driver->hsic_buf_tbl == NULL) {
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+ return -ENOMEM;
+ }
+ driver->count_hsic_pool = 0;
+ driver->count_hsic_write_pool = 0;
+ driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
+ driver->poolsize_hsic = N_MDM_WRITE;
+ driver->itemsize_hsic_write = sizeof(struct diag_request);
+ driver->poolsize_hsic_write = N_MDM_WRITE;
diagmem_hsic_init(driver);
INIT_WORK(&(driver->diag_read_hsic_work),
- diag_read_hsic_work_fn);
+ diag_read_hsic_work_fn);
driver->hsic_inited = 1;
}
-
- mutex_lock(&driver->bridge_mutex);
-
/*
* The probe function was called after the usb was connected
* on the legacy channel OR ODL is turned on. Communication over usb
- * mdm and hsic needs to be turned on.
+ * mdm and HSIC needs to be turned on.
*/
- if (driver->usb_mdm_connected || (driver->logging_mode ==
- MEMORY_DEVICE_MODE)) {
+ if (diag_bridge[HSIC].usb_connected || (driver->logging_mode ==
+ MEMORY_DEVICE_MODE)) {
if (driver->hsic_device_opened) {
/* should not happen. close it before re-opening */
pr_warn("diag: HSIC channel already opened in probe\n");
diag_bridge_close();
}
-
err = diag_bridge_open(&hsic_diag_bridge_ops);
if (err) {
pr_err("diag: could not open HSIC, err: %d\n", err);
driver->hsic_device_opened = 0;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return err;
}
pr_info("diag: opened HSIC channel\n");
driver->hsic_device_opened = 1;
driver->hsic_ch = 1;
-
driver->in_busy_hsic_read_on_device = 0;
driver->in_busy_hsic_write = 0;
- if (driver->usb_mdm_connected) {
+ if (diag_bridge[HSIC].usb_connected) {
/* Poll USB mdm channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_mdm_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
}
-
/* Poll HSIC channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_hsic_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &driver->diag_read_hsic_work);
}
-
- /* The hsic (diag_bridge) platform device driver is enabled */
+ /* The HSIC (diag_bridge) platform device driver is enabled */
driver->hsic_device_enabled = 1;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return err;
}
static int diag_hsic_remove(struct platform_device *pdev)
{
pr_debug("diag: %s called\n", __func__);
- mutex_lock(&driver->bridge_mutex);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
diag_hsic_close();
driver->hsic_device_enabled = 0;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+
return 0;
}
@@ -642,7 +443,7 @@
.runtime_resume = diagfwd_hsic_runtime_resume,
};
-static struct platform_driver msm_hsic_ch_driver = {
+struct platform_driver msm_hsic_ch_driver = {
.probe = diag_hsic_probe,
.remove = diag_hsic_remove,
.driver = {
@@ -651,112 +452,3 @@
.pm = &diagfwd_hsic_dev_pm_ops,
},
};
-
-void diagfwd_bridge_init(void)
-{
- int ret;
-
- pr_debug("diag: in %s\n", __func__);
- driver->diag_bridge_wq = create_singlethread_workqueue(
- "diag_bridge_wq");
- driver->read_len_mdm = 0;
- driver->write_len_mdm = 0;
- driver->num_hsic_buf_tbl_entries = 0;
- spin_lock_init(&driver->hsic_spinlock);
- if (driver->usb_buf_mdm_out == NULL)
- driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF,
- GFP_KERNEL);
- if (driver->usb_buf_mdm_out == NULL)
- goto err;
- /* Only used by smux move to smux probe function */
- if (driver->write_ptr_mdm == NULL)
- driver->write_ptr_mdm = kzalloc(
- sizeof(struct diag_request), GFP_KERNEL);
- if (driver->write_ptr_mdm == NULL)
- goto err;
- if (driver->usb_read_mdm_ptr == NULL)
- driver->usb_read_mdm_ptr = kzalloc(
- sizeof(struct diag_request), GFP_KERNEL);
- if (driver->usb_read_mdm_ptr == NULL)
- goto err;
-
- if (driver->hsic_buf_tbl == NULL)
- driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES *
- sizeof(struct diag_write_device), GFP_KERNEL);
- if (driver->hsic_buf_tbl == NULL)
- goto err;
-
- driver->count_hsic_pool = 0;
- driver->count_hsic_write_pool = 0;
-
- driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
- driver->poolsize_hsic = N_MDM_WRITE;
- driver->itemsize_hsic_write = sizeof(struct diag_request);
- driver->poolsize_hsic_write = N_MDM_WRITE;
-
- mutex_init(&driver->bridge_mutex);
-#ifdef CONFIG_DIAG_OVER_USB
- INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn);
-#endif
- INIT_WORK(&(driver->diag_disconnect_work), diag_disconnect_work_fn);
- INIT_WORK(&(driver->diag_usb_read_complete_work),
- diag_usb_read_complete_fn);
-#ifdef CONFIG_DIAG_OVER_USB
- driver->mdm_ch = usb_diag_open(DIAG_MDM, driver,
- diagfwd_bridge_notifier);
- if (IS_ERR(driver->mdm_ch)) {
- pr_err("diag: Unable to open USB diag MDM channel\n");
- goto err;
- }
-#endif
- /* register HSIC device */
- ret = platform_driver_register(&msm_hsic_ch_driver);
- if (ret)
- pr_err("diag: could not register HSIC device, ret: %d\n", ret);
- /* register SMUX device */
- ret = platform_driver_register(&msm_diagfwd_smux_driver);
- if (ret)
- pr_err("diag: could not register SMUX device, ret: %d\n", ret);
-
- return;
-err:
- pr_err("diag: Could not initialize for bridge forwarding\n");
- kfree(driver->usb_buf_mdm_out);
- kfree(driver->hsic_buf_tbl);
- kfree(driver->write_ptr_mdm);
- kfree(driver->usb_read_mdm_ptr);
- if (driver->diag_bridge_wq)
- destroy_workqueue(driver->diag_bridge_wq);
-
- return;
-}
-
-void diagfwd_bridge_exit(void)
-{
- pr_debug("diag: in %s\n", __func__);
-
- if (driver->hsic_device_enabled) {
- diag_hsic_close();
- driver->hsic_device_enabled = 0;
- }
- driver->hsic_inited = 0;
- diagmem_exit(driver, POOL_TYPE_ALL);
- if (driver->diag_smux_enabled) {
- driver->lcid = LCID_INVALID;
- kfree(driver->buf_in_smux);
- driver->diag_smux_enabled = 0;
- }
- platform_driver_unregister(&msm_hsic_ch_driver);
- platform_driver_unregister(&msm_diagfwd_smux_driver);
- /* destroy USB MDM specific variables */
-#ifdef CONFIG_DIAG_OVER_USB
- if (driver->usb_mdm_connected)
- usb_diag_free_req(driver->mdm_ch);
- usb_diag_close(driver->mdm_ch);
-#endif
- kfree(driver->usb_buf_mdm_out);
- kfree(driver->hsic_buf_tbl);
- kfree(driver->write_ptr_mdm);
- kfree(driver->usb_read_mdm_ptr);
- destroy_workqueue(driver->diag_bridge_wq);
-}
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
index 19ed3c7..2190fff 100644
--- a/drivers/char/diag/diagfwd_hsic.h
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -17,14 +17,14 @@
#define N_MDM_WRITE 8
#define N_MDM_READ 1
-
#define NUM_HSIC_BUF_TBL_ENTRIES N_MDM_WRITE
-int diagfwd_connect_bridge(int);
-int diagfwd_disconnect_bridge(int);
int diagfwd_write_complete_hsic(struct diag_request *);
int diagfwd_cancel_hsic(void);
-void diagfwd_bridge_init(void);
-void diagfwd_bridge_exit(void);
+void diag_read_usb_hsic_work_fn(struct work_struct *work);
+void diag_usb_read_complete_hsic_fn(struct work_struct *w);
+extern struct diag_bridge_ops hsic_diag_bridge_ops;
+extern struct platform_driver msm_hsic_ch_driver;
+void diag_hsic_close(void);
#endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
index ae90686..0a97baf 100644
--- a/drivers/char/diag/diagfwd_smux.c
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -18,6 +18,8 @@
#include "diagchar.h"
#include "diagfwd.h"
#include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diagfwd_bridge.h"
void diag_smux_event(void *priv, int event_type, const void *metadata)
{
@@ -30,8 +32,8 @@
driver->smux_connected = 1;
driver->in_busy_smux = 0;
/* read data from USB MDM channel & Initiate first write */
- queue_work(driver->diag_bridge_wq,
- &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq,
+ &diag_bridge[SMUX].diag_read_work);
break;
case SMUX_DISCONNECTED:
driver->smux_connected = 0;
@@ -67,7 +69,7 @@
int diagfwd_read_complete_smux(void)
{
- queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq, &diag_bridge[SMUX].diag_read_work);
return 0;
}
@@ -85,6 +87,36 @@
return 0;
}
+void diag_usb_read_complete_smux_fn(struct work_struct *w)
+{
+ diagfwd_read_complete_bridge(diag_bridge[SMUX].usb_read_ptr);
+}
+
+void diag_read_usb_smux_work_fn(struct work_struct *work)
+{
+ int ret;
+
+ if (driver->diag_smux_enabled) {
+ if (driver->lcid && diag_bridge[SMUX].usb_buf_out &&
+ (diag_bridge[SMUX].read_len > 0) &&
+ driver->smux_connected) {
+ ret = msm_smux_write(driver->lcid, NULL,
+ diag_bridge[SMUX].usb_buf_out,
+ diag_bridge[SMUX].read_len);
+ if (ret)
+ pr_err("diag: writing to SMUX ch, r = %d, lcid = %d\n",
+ ret, driver->lcid);
+ }
+ diag_bridge[SMUX].usb_read_ptr->buf =
+ diag_bridge[SMUX].usb_buf_out;
+ diag_bridge[SMUX].usb_read_ptr->length = USB_MAX_OUT_BUF;
+ diag_bridge[SMUX].usb_read_ptr->context = (void *)SMUX;
+ usb_diag_read(diag_bridge[SMUX].ch,
+ diag_bridge[SMUX].usb_read_ptr);
+ return;
+ }
+}
+
static int diagfwd_smux_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
@@ -120,7 +152,7 @@
}
}
/* Poll USB channel to check for data*/
- queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq, &(diag_bridge[SMUX].diag_read_work));
return ret;
}
@@ -142,6 +174,11 @@
* if (ret)
* pr_err("diag: error setting SMUX ch option, r = %d\n", ret);
*/
+ if (driver->write_ptr_mdm == NULL)
+ driver->write_ptr_mdm = kzalloc(sizeof(struct diag_request),
+ GFP_KERNEL);
+ if (driver->write_ptr_mdm == NULL)
+ goto err;
ret = diagfwd_connect_smux();
return ret;
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
index e78b7ed..b45fd5d 100644
--- a/drivers/char/diag/diagfwd_smux.h
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -20,6 +20,8 @@
int diagfwd_read_complete_smux(void);
int diagfwd_write_complete_smux(void);
int diagfwd_connect_smux(void);
+void diag_usb_read_complete_smux_fn(struct work_struct *w);
+void diag_read_usb_smux_work_fn(struct work_struct *work);
extern struct platform_driver msm_diagfwd_smux_driver;
#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 1a522d5..ab1aa75 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -51,7 +51,7 @@
driver->diag_write_struct_pool, GFP_ATOMIC);
}
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC) {
if (driver->diag_hsic_pool) {
if (driver->count_hsic_pool < driver->poolsize_hsic) {
@@ -105,7 +105,7 @@
} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
printk(KERN_ALERT "Unable to destroy STRUCT mempool");
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
if (driver->diag_hsic_pool && (driver->hsic_inited == 0)) {
if (driver->count_hsic_pool == 0) {
mempool_destroy(driver->diag_hdlc_pool);
@@ -156,7 +156,7 @@
pr_err("diag: Attempt to free up DIAG driver "
"USB structure mempool which is already free %d ",
driver->count_write_struct_pool);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC) {
if (driver->diag_hsic_pool != NULL &&
driver->count_hsic_pool > 0) {
@@ -210,7 +210,7 @@
printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diagmem_hsic_init(struct diagchar_dev *driver)
{
if (driver->count_hsic_pool == 0)
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
index 8665c75..36def72f 100644
--- a/drivers/char/diag/diagmem.h
+++ b/drivers/char/diag/diagmem.h
@@ -18,7 +18,7 @@
void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
void diagmem_init(struct diagchar_dev *driver);
void diagmem_exit(struct diagchar_dev *driver, int pool_type);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diagmem_hsic_init(struct diagchar_dev *driver);
#endif
#endif
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index e1e3ff5..b3843fa 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -422,6 +422,7 @@
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
+ case MDP_Y_CBCR_H1V2:
p->num_planes = 2;
p->plane_size[0] = w * h;
p->plane_size[1] = w * h;
@@ -470,8 +471,24 @@
unsigned int out_chroma_paddr)
{
int bpp;
-
- if (info->src.format != info->dst.format)
+ uint32_t dst_format;
+ switch (info->src.format) {
+ case MDP_Y_CRCB_H2V1:
+ if (info->rotations & MDP_ROT_90)
+ dst_format = MDP_Y_CRCB_H1V2;
+ else
+ dst_format = info->src.format;
+ break;
+ case MDP_Y_CBCR_H2V1:
+ if (info->rotations & MDP_ROT_90)
+ dst_format = MDP_Y_CBCR_H1V2;
+ else
+ dst_format = info->src.format;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (info->dst.format != dst_format)
return -EINVAL;
bpp = get_bpp(info->src.format);
@@ -1281,10 +1298,18 @@
is_rgb = 1;
info.dst.format = info.src.format;
break;
+ case MDP_Y_CBCR_H2V1:
+ if (info.rotations & MDP_ROT_90) {
+ info.dst.format = MDP_Y_CBCR_H1V2;
+ break;
+ }
+ case MDP_Y_CRCB_H2V1:
+ if (info.rotations & MDP_ROT_90) {
+ info.dst.format = MDP_Y_CRCB_H1V2;
+ break;
+ }
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
info.dst.format = info.src.format;
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index f3fe70f..9f96b19 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -234,6 +234,8 @@
uint32_t timestamp_event;
bool pcsave_impl;
bool pcsave_enable;
+ bool pcsave_sticky_enable;
+ bool pcsave_boot_enable;
};
static struct etm_drvdata *etmdrvdata[NR_CPUS];
@@ -1516,7 +1518,7 @@
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
-static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+static int ____etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
{
int ret = 0;
@@ -1524,7 +1526,6 @@
if (ret)
return ret;
- get_online_cpus();
spin_lock(&drvdata->spinlock);
if (val) {
if (drvdata->pcsave_enable)
@@ -1535,6 +1536,7 @@
if (ret)
goto out;
drvdata->pcsave_enable = true;
+ drvdata->pcsave_sticky_enable = true;
dev_info(drvdata->dev, "PC save enabled\n");
} else {
@@ -1551,12 +1553,22 @@
}
out:
spin_unlock(&drvdata->spinlock);
- put_online_cpus();
clk_disable_unprepare(drvdata->clk);
return ret;
}
+static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+{
+ int ret;
+
+ get_online_cpus();
+ ret = ____etm_store_pcsave(drvdata, val);
+ put_online_cpus();
+
+ return ret;
+}
+
static ssize_t etm_store_pcsave(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -1642,6 +1654,13 @@
}
break;
+ case CPU_ONLINE:
+ if (etmdrvdata[cpu] && etmdrvdata[cpu]->pcsave_boot_enable &&
+ !etmdrvdata[cpu]->pcsave_sticky_enable) {
+ ____etm_store_pcsave(etmdrvdata[cpu], 1);
+ }
+ break;
+
case CPU_DYING:
if (etmdrvdata[cpu] && etmdrvdata[cpu]->enable) {
spin_lock(&etmdrvdata[cpu]->spinlock);
@@ -1894,8 +1913,10 @@
if (boot_enable)
coresight_enable(drvdata->csdev);
- if (drvdata->pcsave_impl && boot_pcsave_enable)
- __etm_store_pcsave(drvdata, true);
+ if (drvdata->pcsave_impl && boot_pcsave_enable) {
+ __etm_store_pcsave(drvdata, 1);
+ drvdata->pcsave_boot_enable = true;
+ }
return 0;
err2:
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4361263..d605a61 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -46,7 +46,7 @@
uint32_t cnt;
};
static struct bam_registration_info bam_registry;
-
+static bool ce_bam_registered;
/*
* CE HW device structure.
* Each engine has an instance of the structure.
@@ -250,7 +250,7 @@
pce = cmdlistinfo->go_proc;
if (i == authk_size_in_word) {
- pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
pce_dev->phy_iobase);
} else {
pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
@@ -434,7 +434,7 @@
pce = cmdlistinfo->go_proc;
if (i == enck_size_in_word) {
use_hw_key = true;
- pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
pce_dev->phy_iobase);
} else {
pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
@@ -1157,15 +1157,21 @@
pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
mutex_lock(&bam_register_cnt);
+ if (ce_bam_registered == false) {
+ bam_registry.handle = 0;
+ bam_registry.cnt = 0;
+ }
if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
/* Register CE Peripheral BAM device to SPS driver */
rc = sps_register_bam_device(&bam, &bam_registry.handle);
if (rc) {
+ mutex_unlock(&bam_register_cnt);
pr_err("sps_register_bam_device() failed! err=%d", rc);
return -EIO;
}
bam_registry.cnt++;
register_bam = true;
+ ce_bam_registered = true;
} else {
bam_registry.cnt++;
}
@@ -1191,9 +1197,14 @@
sps_connect_consumer_err:
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
sps_connect_producer_err:
- if (register_bam)
+ if (register_bam) {
+ mutex_lock(&bam_register_cnt);
sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
-
+ ce_bam_registered = false;
+ bam_registry.handle = 0;
+ bam_registry.cnt = 0;
+ mutex_unlock(&bam_register_cnt);
+ }
return rc;
}
@@ -2787,21 +2798,5 @@
}
EXPORT_SYMBOL(qce_hw_support);
-static int __init qce_init(void)
-{
- bam_registry.handle = 0;
- bam_registry.cnt = 0;
- return 0;
-}
-
-static void __exit qce_exit(void)
-{
- bam_registry.handle = 0;
- bam_registry.cnt = 0;
-}
-
-module_init(qce_init);
-module_exit(qce_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 0109d26..060e89a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -98,6 +98,7 @@
.irq_name = KGSL_3D0_IRQ,
},
.iomemname = KGSL_3D0_REG_MEMORY,
+ .shadermemname = KGSL_3D0_SHADER_MEMORY,
.ftbl = &adreno_functable,
#ifdef CONFIG_HAS_EARLYSUSPEND
.display_off = {
@@ -392,13 +393,6 @@
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff;
sizedwords += 2;
- /*
- * add an interrupt at the end of commands so that the smmu
- * disable clock off function will get called
- */
- *cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
- *cmds++ = CP_INT_CNTL__RB_INT_MASK;
- sizedwords += 2;
/* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement */
adreno_ringbuffer_issuecmds(device, adreno_ctx,
@@ -1776,12 +1770,6 @@
return status;
}
-static inline void adreno_poke(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr);
-}
-
static int adreno_ringbuffer_drain(struct kgsl_device *device,
unsigned int *regs)
{
@@ -1802,12 +1790,8 @@
wait = jiffies + msecs_to_jiffies(100);
- adreno_poke(device);
-
do {
if (time_after(jiffies, wait)) {
- adreno_poke(device);
-
/* Check to see if the core is hung */
if (adreno_hang_detect(device, regs))
return -ETIMEDOUT;
@@ -2021,12 +2005,23 @@
return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
}
-void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
- unsigned int *value)
+/**
+ * adreno_read - General read function to read adreno device memory
+ * @device - Pointer to the GPU device struct (for adreno device)
+ * @base - Base address (kernel virtual) where the device memory is mapped
+ * @offsetwords - Offset in words from the base address, of the memory that
+ * is to be read
+ * @value - Value read from the device memory
+ * @mem_len - Length of the device memory mapped to the kernel
+ */
+static void adreno_read(struct kgsl_device *device, void *base,
+ unsigned int offsetwords, unsigned int *value,
+ unsigned int mem_len)
{
+
unsigned int *reg;
- BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
- reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+ BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
+ reg = (unsigned int *)(base + (offsetwords << 2));
if (!in_interrupt())
kgsl_pre_hwaccess(device);
@@ -2037,6 +2032,31 @@
rmb();
}
+/**
+ * adreno_regread - Used to read adreno device registers
+ * @offsetwords - Word (4 Bytes) offset to the register to be read
+ * @value - Value read from device register
+ */
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int *value)
+{
+ adreno_read(device, device->reg_virt, offsetwords, value,
+ device->reg_len);
+}
+
+/**
+ * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
+ * @device - GPU device whose shader memory is to be read
+ * @offsetwords - Offset in words, of the shader memory address to be read
+ * @value - Pointer to where the read shader mem value is to be stored
+ */
+void adreno_shadermem_regread(struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int *value)
+{
+ adreno_read(device, device->shader_mem_virt, offsetwords, value,
+ device->shader_mem_len);
+}
+
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value)
{
@@ -2070,6 +2090,67 @@
return context_id;
}
+static void adreno_next_event(struct kgsl_device *device,
+ struct kgsl_event *event)
+{
+ int status;
+ unsigned int ref_ts, enableflag;
+ unsigned int context_id = _get_context_id(event->context);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ status = kgsl_check_timestamp(device, event->context, event->timestamp);
+ if (!status) {
+ kgsl_sharedmem_readl(&device->memstore, &enableflag,
+ KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
+ /*
+ * Barrier is needed here to make sure the read from memstore
+ * has posted
+ */
+
+ mb();
+
+ if (enableflag) {
+ kgsl_sharedmem_readl(&device->memstore, &ref_ts,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts));
+
+ /* Make sure the memstore read has posted */
+ mb();
+ if (timestamp_cmp(ref_ts, event->timestamp) >= 0) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), event->timestamp);
+ /* Make sure the memstore write is posted */
+ wmb();
+ }
+ } else {
+ unsigned int cmds[2];
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), event->timestamp);
+ enableflag = 1;
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ts_cmp_enable), enableflag);
+
+ /* Make sure the memstore write gets posted */
+ wmb();
+
+ /*
+ * submit a dummy packet so that even if all
+ * commands upto timestamp get executed we will still
+ * get an interrupt
+ */
+ cmds[0] = cp_type3_packet(CP_NOP, 1);
+ cmds[1] = 0;
+
+ if (adreno_dev->drawctxt_active)
+ adreno_ringbuffer_issuecmds_intr(device,
+ event->context, &cmds[0], 2);
+ }
+ }
+}
+
static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp)
{
@@ -2167,8 +2248,24 @@
if (!adreno_dev->fast_hang_detect)
return 0;
- if (is_adreno_rbbm_status_idle(device))
+ if (is_adreno_rbbm_status_idle(device)) {
+
+ /*
+ * On A2XX if the RPTR != WPTR and the device is idle, then
+ * the last write to WPTR probably failed to latch so write it
+ * again
+ */
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ unsigned int rptr;
+ adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+ if (rptr != adreno_dev->ringbuffer.wptr)
+ adreno_regwrite(device, REG_CP_RB_WPTR,
+ adreno_dev->ringbuffer.wptr);
+ }
+
return 0;
+ }
for (i = 0; i < hang_detect_regs_count; i++) {
@@ -2186,178 +2283,225 @@
return hang_detected;
}
-
-/* MUST be called with the device mutex held */
-static int adreno_waittimestamp(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int timestamp,
- unsigned int msecs)
+/**
+ * adreno_handle_hang - Process a hang detected in adreno_waittimestamp
+ * @device - pointer to a KGSL device structure
+ * @context - pointer to the active KGSL context
+ * @timestamp - the timestamp that the process was waiting for
+ *
+ * Process a possible GPU hang and try to recover from it cleanly
+ */
+static int adreno_handle_hang(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
{
- long status = 0;
- uint io = 1;
- static uint io_cnt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
- int retries = 0;
- unsigned int ts_issued;
unsigned int context_id = _get_context_id(context);
- unsigned int time_elapsed = 0;
- unsigned int prev_reg_val[hang_detect_regs_count];
- unsigned int wait;
- unsigned int retry_ts_cmp = 0;
- unsigned int retry_ts_cmp_msecs = KGSL_SYNCOBJ_SERVER_TIMEOUT;
+ unsigned int ts_issued;
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
+ /* Do one last check to see if we somehow made it through */
+ if (kgsl_check_timestamp(device, context, timestamp))
+ return 0;
ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
- /* Don't wait forever, set a max value for now */
- if (msecs == KGSL_TIMEOUT_DEFAULT)
- msecs = adreno_dev->wait_timeout;
-
- /*
- * With user generated ts, if this check fails perform this check
- * again after 'retry_ts_cmp_msecs' milliseconds.
- */
- if (timestamp_cmp(timestamp, ts_issued) > 0) {
- if (adreno_ctx == NULL ||
- !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
- if (context && !context->wait_on_invalid_ts) {
- KGSL_DRV_ERR(device,
- "Cannot wait for invalid ts <%d:0x%x>, "
- "last issued ts <%d:0x%x>\n",
- context_id, timestamp, context_id, ts_issued);
- /*
- * Prevent the above message from spamming the
- * kernel logs and causing a watchdog
- */
- context->wait_on_invalid_ts = true;
- }
- status = -EINVAL;
- goto done;
- } else
- retry_ts_cmp = 1;
- } else if (context && context->wait_on_invalid_ts) {
- /* Once we wait for a valid ts reset the invalid wait flag */
- context->wait_on_invalid_ts = false;
- }
-
- /*
- * Make the first timeout interval 100 msecs and then try to kick the
- * wptr again. This helps to ensure the wptr is updated properly. If
- * the requested timeout is less than 100 msecs, then wait 20msecs which
- * is the minimum amount of time we can safely wait at 100HZ
- */
-
- if (msecs == 0 || msecs >= 100)
- wait = 100;
- else
- wait = 20;
-
- do {
- /*
- * If the context ID is invalid, we are in a race with
- * the context being destroyed by userspace so bail.
- */
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- status = -EINVAL;
- goto done;
- }
- if (kgsl_check_timestamp(device, context, timestamp)) {
- /* if the timestamp happens while we're not
- * waiting, there's a chance that an interrupt
- * will not be generated and thus the timestamp
- * work needs to be queued.
- */
- queue_work(device->work_queue, &device->ts_expired_ws);
- status = 0;
- goto done;
- }
- adreno_poke(device);
- io_cnt = (io_cnt + 1) % 100;
- if (io_cnt <
- pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
- io = 0;
-
- if ((retries > 0) &&
- (adreno_hang_detect(device, prev_reg_val)))
- goto hang_dump;
-
- mutex_unlock(&device->mutex);
- /* We need to make sure that the process is
- * placed in wait-q before its condition is called
- */
- status = kgsl_wait_event_interruptible_timeout(
- device->wait_queue,
- kgsl_check_interrupt_timestamp(device,
- context, timestamp),
- msecs_to_jiffies(wait), io);
-
- mutex_lock(&device->mutex);
-
- if (status > 0) {
- /*completed before the wait finished */
- status = 0;
- goto done;
- } else if (status < 0) {
- /*an error occurred*/
- goto done;
- }
- /*this wait timed out*/
-
- time_elapsed += wait;
- wait = KGSL_TIMEOUT_PART;
-
- if (!retry_ts_cmp)
- retries++;
- else if (time_elapsed >= retry_ts_cmp_msecs) {
- ts_issued =
- adreno_dev->ringbuffer.timestamp[context_id];
- if (timestamp_cmp(timestamp, ts_issued) > 0) {
- if (context && !context->wait_on_invalid_ts) {
- KGSL_DRV_ERR(device,
- "Cannot wait for user-generated ts <%d:0x%x>, "
- "not submitted within server timeout period. "
- "last issued ts <%d:0x%x>\n",
- context_id, timestamp, context_id,
- ts_issued);
- context->wait_on_invalid_ts = true;
- }
- status = -EINVAL;
- goto done;
- } else if (context && context->wait_on_invalid_ts) {
- context->wait_on_invalid_ts = false;
- }
- retry_ts_cmp = 0;
- }
-
- } while (!msecs || time_elapsed < msecs);
-
-hang_dump:
- /*
- * Check if timestamp has retired here because we may have hit
- * recovery which can take some time and cause waiting threads
- * to timeout
- */
- if (kgsl_check_timestamp(device, context, timestamp))
- goto done;
- status = -ETIMEDOUT;
KGSL_DRV_ERR(device,
"Device hang detected while waiting for timestamp: "
"<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
"wptr: 0x%x\n",
context_id, timestamp, context_id, ts_issued,
adreno_dev->ringbuffer.wptr);
- if (!adreno_dump_and_recover(device)) {
- /* The timestamp that this process wanted
- * to wait on may be invalid or expired now
- * after successful recovery */
- status = 0;
+
+ /* Return 0 after a successful recovery */
+ if (!adreno_dump_and_recover(device))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int _check_pending_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int context_id = _get_context_id(context);
+ unsigned int ts_issued;
+
+ if (context_id == KGSL_CONTEXT_INVALID)
+ return -EINVAL;
+
+ ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
+
+ if (timestamp_cmp(timestamp, ts_issued) <= 0)
+ return 0;
+
+ if (context && !context->wait_on_invalid_ts) {
+ KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
+ context_id, timestamp, context_id, ts_issued);
+
+ /* Only print this message once */
+ context->wait_on_invalid_ts = true;
}
-done:
- return (int)status;
+
+ return -EINVAL;
+}
+
+/**
+ * adreno_waittimestamp - sleep while waiting for the specified timestamp
+ * @device - pointer to a KGSL device structure
+ * @context - pointer to the active kgsl context
+ * @timestamp - GPU timestamp to wait for
+ * @msecs - amount of time to wait (in milliseconds)
+ *
+ * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
+ * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
+ * one if it happened. Otherwise, spend most of our time in an interruptible
+ * wait for the timestamp interrupt to be processed. This function must be
+ * called with the mutex already held.
+ */
+static int adreno_waittimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ static unsigned int io_cnt;
+ struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int context_id = _get_context_id(context);
+ unsigned int prev_reg_val[hang_detect_regs_count];
+ unsigned int time_elapsed = 0;
+ unsigned int wait;
+ int ts_compare = 1;
+ int io, ret = -ETIMEDOUT;
+
+ /* Get out early if the context has already been destroyed */
+
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ return -EINVAL;
+ }
+
+ /*
+ * Check to see if the requested timestamp is "newer" then the last
+ * timestamp issued. If it is complain once and return error. Only
+ * print the message once per context so that badly behaving
+ * applications don't spam the logs
+ */
+
+ if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
+ if (_check_pending_timestamp(device, context, timestamp))
+ return -EINVAL;
+
+ /* Reset the invalid timestamp flag on a valid wait */
+ context->wait_on_invalid_ts = false;
+ }
+
+
+ /* Clear the registers used for hang detection */
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
+ /*
+ * On the first time through the loop only wait 100ms.
+ * this gives enough time for the engine to start moving and oddly
+ * provides better hang detection results than just going the full
+ * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
+ * is if msecs happens to be < 100ms then just use the full timeout
+ */
+
+ wait = 100;
+
+ do {
+ long status;
+
+ if (wait > (msecs - time_elapsed))
+ wait = msecs - time_elapsed;
+
+ /*
+ * if the timestamp happens while we're not
+ * waiting, there's a chance that an interrupt
+ * will not be generated and thus the timestamp
+ * work needs to be queued.
+ */
+
+ if (kgsl_check_timestamp(device, context, timestamp)) {
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ ret = 0;
+ break;
+ }
+
+ /* Check to see if the GPU is hung */
+ if (adreno_hang_detect(device, prev_reg_val)) {
+ ret = adreno_handle_hang(device, context, timestamp);
+ break;
+ }
+
+ /*
+ * For proper power accounting sometimes we need to call
+ * io_wait_interruptible_timeout and sometimes we need to call
+ * plain old wait_interruptible_timeout. We call the regular
+ * timeout N times out of 100, where N is a number specified by
+ * the current power level
+ */
+
+ io_cnt = (io_cnt + 1) % 100;
+ io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ ? 0 : 1;
+
+ mutex_unlock(&device->mutex);
+
+ /* Wait for a timestamp event */
+ status = kgsl_wait_event_interruptible_timeout(
+ device->wait_queue,
+ kgsl_check_interrupt_timestamp(device, context,
+ timestamp), msecs_to_jiffies(wait), io);
+
+ mutex_lock(&device->mutex);
+
+ /*
+ * If status is non zero then either the condition was satisfied
+ * or there was an error. In either event, this is the end of
+ * the line for us
+ */
+
+ if (status != 0) {
+ ret = (status > 0) ? 0 : (int) status;
+ break;
+ }
+
+ time_elapsed += wait;
+
+ /* If user specified timestamps are being used, wait at least
+ * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
+ * issue a IB for a timestamp before checking to see if the
+ * current timestamp we are waiting for is valid or not
+ */
+
+ if (ts_compare && (adreno_ctx &&
+ (adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
+ if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
+ ret = _check_pending_timestamp(device, context,
+ timestamp);
+ if (ret)
+ break;
+
+ /* Don't do this check again */
+ ts_compare = 0;
+
+ /*
+ * Reset the invalid timestamp flag on a valid
+ * wait
+ */
+ context->wait_on_invalid_ts = false;
+ }
+ }
+
+ /*
+ * all subsequent trips through the loop wait the full
+ * KGSL_TIMEOUT_PART interval
+ */
+ wait = KGSL_TIMEOUT_PART;
+
+ } while (!msecs || time_elapsed < msecs);
+
+ return ret;
}
static unsigned int adreno_readtimestamp(struct kgsl_device *device,
@@ -2516,6 +2660,7 @@
.drawctxt_destroy = adreno_drawctxt_destroy,
.setproperty = adreno_setproperty,
.postmortem_dump = adreno_dump,
+ .next_event = adreno_next_event,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index f9d0316..61378fe 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -181,6 +181,10 @@
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
+void adreno_shadermem_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value);
+
int adreno_dump(struct kgsl_device *device, int manual);
unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
*adreno_dev);
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index e4f5733..1243dd0 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/io.h>
#include "kgsl.h"
#include "adreno.h"
#include "kgsl_snapshot.h"
@@ -19,14 +20,27 @@
#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
+ sizeof(struct kgsl_snapshot_debug))
+/* Shader memory size in words */
#define SHADER_MEMORY_SIZE 0x4000
+/**
+ * a3xx_snapshot_shader_memory - Helper function to dump the GPU shader
+ * memory to the snapshot buffer.
+ * @device - GPU device whose shader memory is to be dumped
+ * @snapshot - Pointer to binary snapshot data blob being made
+ * @remain - Number of remaining bytes in the snapshot blob
+ * @priv - Unused parameter
+ */
static int a3xx_snapshot_shader_memory(struct kgsl_device *device,
void *snapshot, int remain, void *priv)
{
struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int i;
unsigned int *data = snapshot + sizeof(*header);
- int i;
+ unsigned int shader_read_len = SHADER_MEMORY_SIZE;
+
+ if (SHADER_MEMORY_SIZE > (device->shader_mem_len >> 2))
+ shader_read_len = (device->shader_mem_len >> 2);
if (remain < DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE)) {
SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
@@ -36,8 +50,22 @@
header->type = SNAPSHOT_DEBUG_SHADER_MEMORY;
header->size = SHADER_MEMORY_SIZE;
- for (i = 0; i < SHADER_MEMORY_SIZE; i++)
- adreno_regread(device, 0x4000 + i, &data[i]);
+ /* Map shader memory to kernel, for dumping */
+ if (device->shader_mem_virt == NULL)
+ device->shader_mem_virt = devm_ioremap(device->dev,
+ device->shader_mem_phys,
+ device->shader_mem_len);
+
+ if (device->shader_mem_virt == NULL) {
+ KGSL_DRV_ERR(device,
+ "Unable to map shader memory region\n");
+ return 0;
+ }
+
+ /* Now, dump shader memory to snapshot */
+ for (i = 0; i < shader_read_len; i++)
+ adreno_shadermem_regread(device, i, &data[i]);
+
return DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE);
}
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index e069fa5..6e0d6ad 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -12,6 +12,7 @@
*/
#include <linux/vmalloc.h>
+#include <mach/board.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
@@ -737,6 +738,8 @@
mb();
+ msm_clk_dump_debug_info();
+
if (adreno_is_a2xx(adreno_dev))
adreno_dump_a2xx(device);
else if (adreno_is_a3xx(adreno_dev))
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 9648f27..27343c5 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -555,6 +555,9 @@
total_sizedwords += 4; /* global timestamp for recovery*/
}
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
if (!ringcmds) {
/*
@@ -672,6 +675,12 @@
rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
}
+ if (adreno_is_a20x(adreno_dev)) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH);
+ }
+
if (context) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index c040bf3..e61b040 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -504,6 +504,22 @@
kfree(event);
}
+ /* Send the next pending event for each context to the device */
+ if (device->ftbl->next_event) {
+ unsigned int id = KGSL_MEMSTORE_GLOBAL;
+
+ list_for_each_entry(event, &device->events, list) {
+
+ if (!event->context)
+ continue;
+
+ if (event->context->id != id) {
+ device->ftbl->next_event(device, event);
+ id = event->context->id;
+ }
+ }
+ }
+
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_timestamp_expired);
@@ -2475,6 +2491,7 @@
kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
+ /* Get starting physical address of device registers */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
device->iomemname);
if (res == NULL) {
@@ -2492,6 +2509,33 @@
device->reg_phys = res->start;
device->reg_len = resource_size(res);
+ /*
+ * Check if a shadermemname is defined, and then get shader memory
+ * details including shader memory starting physical address
+ * and shader memory length
+ */
+ if (device->shadermemname != NULL) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ device->shadermemname);
+
+ if (res == NULL) {
+ KGSL_DRV_ERR(device,
+ "Shader memory: platform_get_resource_byname failed\n");
+ }
+
+ else {
+ device->shader_mem_phys = res->start;
+ device->shader_mem_len = resource_size(res);
+ }
+
+ if (!devm_request_mem_region(device->dev,
+ device->shader_mem_phys,
+ device->shader_mem_len,
+ device->name)) {
+ KGSL_DRV_ERR(device, "request_mem_region_failed\n");
+ }
+ }
+
if (!devm_request_mem_region(device->dev, device->reg_phys,
device->reg_len, device->name)) {
KGSL_DRV_ERR(device, "request_mem_region failed\n");
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d962bf1..35ffc1b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -58,6 +58,7 @@
struct kgsl_device_private;
struct kgsl_context;
struct kgsl_power_stats;
+struct kgsl_event;
struct kgsl_functable {
/* Mandatory functions - these functions must be implemented
@@ -112,6 +113,8 @@
enum kgsl_property_type type, void *value,
unsigned int sizebytes);
int (*postmortem_dump) (struct kgsl_device *device, int manual);
+ void (*next_event)(struct kgsl_device *device,
+ struct kgsl_event *event);
};
/* MH register values */
@@ -140,11 +143,27 @@
unsigned int ver_minor;
uint32_t flags;
enum kgsl_deviceid id;
+
+ /* Starting physical address for GPU registers */
unsigned long reg_phys;
+
+ /* Starting Kernel virtual address for GPU registers */
void *reg_virt;
+
+ /* Total memory size for all GPU registers */
unsigned int reg_len;
+
+ /* Kernel virtual address for GPU shader memory */
+ void *shader_mem_virt;
+
+ /* Starting physical address for GPU shader memory */
+ unsigned long shader_mem_phys;
+
+ /* GPU shader memory size */
+ unsigned int shader_mem_len;
struct kgsl_memdesc memstore;
const char *iomemname;
+ const char *shadermemname;
struct kgsl_mh mh;
struct kgsl_mmu mmu;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 6fe119d..bf39587 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -25,6 +25,7 @@
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
+#include "adreno.h"
#define KGSL_MMU_ALIGN_SHIFT 13
#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
@@ -536,6 +537,12 @@
uint32_t flags)
{
struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
+ && !adreno_is_a2xx(adreno_dev))
+ return;
+
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else if (device->ftbl->setstate)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7a7a8dc..27f198b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -86,63 +86,296 @@
clkstats->start = ktime_get();
}
+/*
+ * Given a requested power level do bounds checking on the constraints and
+ * return the nearest possible level
+ */
+
+static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
+{
+ unsigned int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
+ pwr->max_pwrlevel);
+
+ unsigned int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
+ pwr->min_pwrlevel);
+
+ if (level < max_pwrlevel)
+ return max_pwrlevel;
+ if (level > min_pwrlevel)
+ return min_pwrlevel;
+
+ return level;
+}
+
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- if (new_level < (pwr->num_pwrlevels - 1) &&
- new_level >= pwr->thermal_pwrlevel &&
- new_level != pwr->active_pwrlevel) {
- struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
- int diff = new_level - pwr->active_pwrlevel;
- int d = (diff > 0) ? 1 : -1;
- int level = pwr->active_pwrlevel;
- /* Update the clock stats */
- update_clk_statistics(device, true);
- /* Finally set active level */
- pwr->active_pwrlevel = new_level;
- if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
- (device->state == KGSL_STATE_NAP)) {
- /*
- * On some platforms, instability is caused on
- * changing clock freq when the core is busy.
- * Idle the gpu core before changing the clock freq.
- */
- if (pwr->idle_needed == true)
- device->ftbl->idle(device);
+ struct kgsl_pwrlevel *pwrlevel;
+ int delta;
- /* Don't shift by more than one level at a time to
- * avoid glitches.
- */
- while (level != new_level) {
- level += d;
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[level].gpu_freq);
- }
+ /* Adjust the power level to the current constraints */
+ new_level = _adjust_pwrlevel(pwr, new_level);
+
+ if (new_level == pwr->active_pwrlevel)
+ return;
+
+ delta = new_level < pwr->active_pwrlevel ? -1 : 1;
+
+ update_clk_statistics(device, true);
+
+ if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
+ (device->state == KGSL_STATE_NAP)) {
+
+ /*
+ * On some platforms, instability is caused on
+ * changing clock freq when the core is busy.
+ * Idle the gpu core before changing the clock freq.
+ */
+
+ if (pwr->idle_needed == true)
+ device->ftbl->idle(device);
+
+ /*
+ * Don't shift by more than one level at a time to
+ * avoid glitches.
+ */
+
+ while (pwr->active_pwrlevel != new_level) {
+ pwr->active_pwrlevel += delta;
+
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
}
- if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
- if (pwr->pcl)
- msm_bus_scale_client_update_request(pwr->pcl,
- pwrlevel->bus_freq);
- else if (pwr->ebi1_clk)
- clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
- }
- trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
- pwrlevel->gpu_freq);
}
+
+ pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
+
+ if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
+
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ pwrlevel->bus_freq);
+ else if (pwr->ebi1_clk)
+ clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
+ }
+
+ trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
}
+
EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
-static int __gpuclk_store(int max, struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{ int ret, i, delta = 5000000;
- unsigned long val;
+static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
+ int ret, level;
if (device == NULL)
return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+
+ if (level > pwr->num_pwrlevels - 2)
+ level = pwr->num_pwrlevels - 2;
+
+ pwr->thermal_pwrlevel = level;
+
+ /*
+ * If there is no power policy set the clock to the requested thermal
+ * level - if thermal now happens to be higher than max, then that will
+ * be limited by the pwrlevel change function. Otherwise if there is
+ * a policy only change the active clock if it is higher then the new
+ * thermal level
+ */
+
+ if (device->pwrscale.policy == NULL ||
+ pwr->thermal_pwrlevel > pwr->active_pwrlevel)
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
+}
+
+static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, level, max_level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ /* If the use specifies a negative number, then don't change anything */
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+
+ /* You can't set a maximum power level lower than the minimum */
+ if (level > pwr->min_pwrlevel)
+ level = pwr->min_pwrlevel;
+
+ pwr->max_pwrlevel = level;
+
+
+ max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
+
+ /*
+ * If there is no policy then move to max by default. Otherwise only
+ * move max if the current level happens to be higher then the new max
+ */
+
+ if (device->pwrscale.policy == NULL ||
+ (max_level > pwr->active_pwrlevel))
+ kgsl_pwrctrl_pwrlevel_change(device, max_level);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
+}
+
+static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, level, min_level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ /* Don't do anything on obviously incorrect values */
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+ if (level > pwr->num_pwrlevels - 2)
+ level = pwr->num_pwrlevels - 2;
+
+ /* You can't set a minimum power level lower than the maximum */
+ if (level < pwr->max_pwrlevel)
+ level = pwr->max_pwrlevel;
+
+ pwr->min_pwrlevel = level;
+
+ min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
+
+ /* Only move the power level higher if minimum is higher then the
+ * current level
+ */
+
+ if (min_level < pwr->active_pwrlevel)
+ kgsl_pwrctrl_pwrlevel_change(device, min_level);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
+}
+
+static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
+}
+
+/* Given a GPU clock value, return the nearest powerlevel */
+
+static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
+{
+ int i;
+
+ for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
+ if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
+ return i;
+ }
+
+ return -ERANGE;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ unsigned long val;
+ int ret, level;
+
+ if (device == NULL)
+ return 0;
+
pwr = &device->pwrctrl;
ret = sscanf(buf, "%ld", &val);
@@ -150,44 +383,30 @@
return count;
mutex_lock(&device->mutex);
- for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
- if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
- if (max)
- pwr->thermal_pwrlevel = i;
- break;
- }
- }
-
- if (i == (pwr->num_pwrlevels - 1))
+ level = _get_nearest_pwrlevel(pwr, val);
+ if (level < 0)
goto done;
+ pwr->thermal_pwrlevel = level;
+
/*
- * If the current or requested clock speed is greater than the
- * thermal limit, bump down immediately.
+ * if the thermal limit is lower than the current setting,
+ * move the speed down immediately
*/
- if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
- pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
+ if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
- else if (!max || (NULL == device->pwrscale.policy))
- kgsl_pwrctrl_pwrlevel_change(device, i);
done:
mutex_unlock(&device->mutex);
return count;
}
-static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return __gpuclk_store(1, dev, attr, buf, count);
-}
-
static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
@@ -201,7 +420,27 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- return __gpuclk_store(0, dev, attr, buf, count);
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ unsigned long val;
+ int ret, level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%ld", &val);
+ if (ret != 1)
+ return count;
+
+ mutex_lock(&device->mutex);
+ level = _get_nearest_pwrlevel(pwr, val);
+ if (level >= 0)
+ kgsl_pwrctrl_pwrlevel_change(device, level);
+
+ mutex_unlock(&device->mutex);
+ return count;
}
static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
@@ -382,6 +621,18 @@
DEVICE_ATTR(gpu_available_frequencies, 0444,
kgsl_pwrctrl_gpu_available_frequencies_show,
NULL);
+DEVICE_ATTR(max_pwrlevel, 0644,
+ kgsl_pwrctrl_max_pwrlevel_show,
+ kgsl_pwrctrl_max_pwrlevel_store);
+DEVICE_ATTR(min_pwrlevel, 0644,
+ kgsl_pwrctrl_min_pwrlevel_show,
+ kgsl_pwrctrl_min_pwrlevel_store);
+DEVICE_ATTR(thermal_pwrlevel, 0644,
+ kgsl_pwrctrl_thermal_pwrlevel_show,
+ kgsl_pwrctrl_thermal_pwrlevel_store);
+DEVICE_ATTR(num_pwrlevels, 0444,
+ kgsl_pwrctrl_num_pwrlevels_show,
+ NULL);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -391,6 +642,10 @@
&dev_attr_gpubusy,
&dev_attr_gputop,
&dev_attr_gpu_available_frequencies,
+ &dev_attr_max_pwrlevel,
+ &dev_attr_min_pwrlevel,
+ &dev_attr_thermal_pwrlevel,
+ &dev_attr_num_pwrlevels,
NULL
};
@@ -623,6 +878,13 @@
goto done;
}
pwr->num_pwrlevels = pdata->num_levels;
+
+ /* Initialize the user and thermal clock constraints */
+
+ pwr->max_pwrlevel = 0;
+ pwr->min_pwrlevel = pdata->num_levels - 2;
+ pwr->thermal_pwrlevel = 0;
+
pwr->active_pwrlevel = pdata->init_level;
pwr->default_pwrlevel = pdata->init_level;
for (i = 0; i < pdata->num_levels; i++) {
@@ -885,6 +1147,9 @@
kgsl_pwrstate_to_str(device->state));
break;
}
+
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index c02a9fc..e51ec54 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -38,6 +38,30 @@
unsigned int elapsed_old;
};
+/**
+ * struct kgsl_pwrctrl - Power control settings for a KGSL device
+ * @interrupt_num - The interrupt number for the device
+ * @ebi1_clk - Pointer to the EBI clock structure
+ * @grp_clks - Array of clocks structures that we control
+ * @power_flags - Control flags for power
+ * @pwrlevels - List of supported power levels
+ * @active_pwrlevel - The currently active power level
+ * @thermal_pwrlevel - maximum powerlevel constraint from thermal
+ * @max_pwrlevel - maximum allowable powerlevel per the user
+ * @min_pwrlevel - minimum allowable powerlevel per the user
+ * @num_pwrlevels - number of available power levels
+ * @interval_timeout - timeout in jiffies to be idle before a power event
+ * @strtstp_sleepwake - true if the device supports low latency GPU start/stop
+ * @gpu_reg - pointer to the regulator structure for gpu_reg
+ * @gpu_cx - pointer to the regulator structure for gpu_cx
+ * @pcl - bus scale identifier
+ * @nap_allowed - true if the device supports naps
+ * @idle_needed - true if the device needs a idle before clock change
+ * @irq_name - resource name for the IRQ
+ * @restore_slumber - Flag to indicate that we are in a suspend/restore sequence
+ * @clk_stats - structure of clock statistics
+ */
+
struct kgsl_pwrctrl {
int interrupt_num;
struct clk *ebi1_clk;
@@ -47,6 +71,8 @@
unsigned int active_pwrlevel;
int thermal_pwrlevel;
unsigned int default_pwrlevel;
+ unsigned int max_pwrlevel;
+ unsigned int min_pwrlevel;
unsigned int num_pwrlevels;
unsigned int interval_timeout;
bool strtstp_sleepwake;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index f6277b3..aad1a8d 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -299,8 +299,14 @@
{
if (device->pwrscale.policy != NULL) {
device->pwrscale.policy->close(device, &device->pwrscale);
+
+ /*
+ * Try to set max pwrlevel which will be limited to thermal by
+ * kgsl_pwrctrl_pwrlevel_change if thermal is indeed lower
+ */
+
kgsl_pwrctrl_pwrlevel_change(device,
- device->pwrctrl.thermal_pwrlevel);
+ device->pwrctrl.max_pwrlevel);
}
device->pwrscale.policy = NULL;
}
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index 7c2514b..e01932b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -93,7 +93,7 @@
priv->governor = TZ_GOVERNOR_PERFORMANCE;
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
- kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
mutex_unlock(&device->mutex);
return count;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index a384103..5fb041d 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -299,81 +299,83 @@
{419, 128000}
};
+/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {-40, 1758},
- {-35, 1742},
- {-30, 1719},
- {-25, 1691},
- {-20, 1654},
- {-15, 1608},
- {-10, 1551},
- {-5, 1483},
- {0, 1404},
- {5, 1315},
- {10, 1218},
- {15, 1114},
- {20, 1007},
- {25, 900},
- {30, 795},
- {35, 696},
- {40, 605},
- {45, 522},
- {50, 448},
- {55, 383},
- {60, 327},
- {65, 278},
- {70, 237},
- {75, 202},
- {80, 172},
- {85, 146},
- {90, 125},
- {95, 107},
- {100, 92},
- {105, 79},
- {110, 68},
- {115, 59},
- {120, 51},
- {125, 44}
+ {1758, -40},
+ {1742, -35},
+ {1719, -30},
+ {1691, -25},
+ {1654, -20},
+ {1608, -15},
+ {1551, -10},
+ {1483, -5},
+ {1404, 0},
+ {1315, 5},
+ {1218, 10},
+ {1114, 15},
+ {1007, 20},
+ {900, 25},
+ {795, 30},
+ {696, 35},
+ {605, 40},
+ {522, 45},
+ {448, 50},
+ {383, 55},
+ {327, 60},
+ {278, 65},
+ {237, 70},
+ {202, 75},
+ {172, 80},
+ {146, 85},
+ {125, 90},
+ {107, 95},
+ {92, 100},
+ {79, 105},
+ {68, 110},
+ {59, 115},
+ {51, 120},
+ {44, 125}
};
+/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
- {-40, 1738},
- {-35, 1714},
- {-30, 1682},
- {-25, 1641},
- {-20, 1589},
- {-15, 1526},
- {-10, 1451},
- {-5, 1363},
- {0, 1266},
- {5, 1159},
- {10, 1048},
- {15, 936},
- {20, 825},
- {25, 720},
- {30, 622},
- {35, 533},
- {40, 454},
- {45, 385},
- {50, 326},
- {55, 275},
- {60, 232},
- {65, 195},
- {70, 165},
- {75, 139},
- {80, 118},
- {85, 100},
- {90, 85},
- {95, 73},
- {100, 62},
- {105, 53},
- {110, 46},
- {115, 40},
- {120, 34},
- {125, 30}
+ {1738, -40},
+ {1714, -35},
+ {1682, -30},
+ {1641, -25},
+ {1589, -20},
+ {1526, -15},
+ {1451, -10},
+ {1363, -5},
+ {1266, 0},
+ {1159, 5},
+ {1048, 10},
+ {936, 15},
+ {825, 20},
+ {720, 25},
+ {622, 30},
+ {533, 35},
+ {454, 40},
+ {385, 45},
+ {326, 50},
+ {275, 55},
+ {232, 60},
+ {195, 65},
+ {165, 70},
+ {139, 75},
+ {118, 80},
+ {100, 85},
+ {85, 90},
+ {73, 95},
+ {62, 100},
+ {53, 105},
+ {46, 110},
+ {40, 115},
+ {34, 120},
+ {30, 125}
};
-static int32_t qpnp_adc_map_linear(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
uint32_t tablesize, int32_t input, int64_t *output)
{
bool descending = 1;
@@ -419,7 +421,7 @@
return 0;
}
-static int32_t qpnp_adc_map_batt_therm(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
uint32_t tablesize, int32_t input, int64_t *output)
{
bool descending = 1;
@@ -552,7 +554,7 @@
xo_thm = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
xo_thm <<= 4;
- qpnp_adc_map_linear(adcmap_ntcg_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_ntcg_104ef_104fb,
ARRAY_SIZE(adcmap_ntcg_104ef_104fb),
xo_thm, &adc_chan_result->physical);
@@ -570,7 +572,7 @@
bat_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- return qpnp_adc_map_batt_therm(
+ return qpnp_adc_map_temp_voltage(
adcmap_btm_threshold,
ARRAY_SIZE(adcmap_btm_threshold),
bat_voltage,
@@ -588,7 +590,7 @@
therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- qpnp_adc_map_linear(adcmap_150k_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
ARRAY_SIZE(adcmap_150k_104ef_104fb),
therm_voltage, &adc_chan_result->physical);
@@ -606,7 +608,7 @@
therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- qpnp_adc_map_linear(adcmap_100k_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
ARRAY_SIZE(adcmap_100k_104ef_104fb),
therm_voltage, &adc_chan_result->physical);
@@ -614,6 +616,63 @@
}
EXPORT_SYMBOL_GPL(qpnp_adc_scale_therm_pu2);
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(uint32_t reg, int64_t *result)
+{
+ int64_t adc_voltage = 0;
+ struct qpnp_vadc_linear_graph param1;
+ int negative_offset;
+
+ qpnp_get_vadc_gain_and_offset(¶m1, CALIB_RATIOMETRIC);
+
+ adc_voltage = (reg - param1.adc_gnd) * param1.adc_vref;
+ if (adc_voltage < 0) {
+ negative_offset = 1;
+ adc_voltage = -adc_voltage;
+ }
+
+ do_div(adc_voltage, param1.dy);
+
+ qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ adc_voltage, result);
+ if (negative_offset)
+ adc_voltage = -adc_voltage;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_tm_scale_voltage_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_tm_config *param)
+{
+ struct qpnp_vadc_linear_graph param1;
+ int rc;
+
+ qpnp_get_vadc_gain_and_offset(¶m1, CALIB_RATIOMETRIC);
+
+ rc = qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ param->low_thr_temp, ¶m->low_thr_voltage);
+ if (rc)
+ return rc;
+
+ param->low_thr_voltage *= param1.dy;
+ do_div(param->low_thr_voltage, param1.adc_vref);
+ param->low_thr_voltage += param1.adc_gnd;
+
+ rc = qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ param->high_thr_temp, ¶m->high_thr_voltage);
+ if (rc)
+ return rc;
+
+ param->high_thr_voltage *= param1.dy;
+ do_div(param->high_thr_voltage, param1.adc_vref);
+ param->high_thr_voltage += param1.adc_gnd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_tm_scale_therm_voltage_pu2);
+
int32_t qpnp_adc_scale_batt_id(int32_t adc_code,
const struct qpnp_adc_properties *adc_properties,
const struct qpnp_vadc_chan_properties *chan_properties,
@@ -687,6 +746,65 @@
}
EXPORT_SYMBOL_GPL(qpnp_adc_scale_default);
+int32_t qpnp_adc_usb_scaler(struct qpnp_adc_tm_usbid_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph usb_param;
+
+ qpnp_get_vadc_gain_and_offset(&usb_param, CALIB_ABSOLUTE);
+
+ *low_threshold = param->low_thr * usb_param.dy;
+ do_div(*low_threshold, usb_param.adc_vref);
+ *low_threshold += usb_param.adc_gnd;
+
+ *high_threshold = param->high_thr * usb_param.dy;
+ do_div(*high_threshold, usb_param.adc_vref);
+ *high_threshold += usb_param.adc_gnd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_usb_scaler);
+
+int32_t qpnp_adc_btm_scaler(struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t *low_output = 0, *high_output = 0;
+ int rc = 0;
+
+ qpnp_get_vadc_gain_and_offset(&btm_param, CALIB_RATIOMETRIC);
+
+ rc = qpnp_adc_map_temp_voltage(
+ adcmap_btm_threshold,
+ ARRAY_SIZE(adcmap_btm_threshold),
+ (param->low_temp),
+ low_output);
+ if (rc)
+ return rc;
+
+ *low_output *= btm_param.dy;
+ do_div(*low_output, btm_param.adc_vref);
+ *low_output += btm_param.adc_gnd;
+
+ rc = qpnp_adc_map_temp_voltage(
+ adcmap_btm_threshold,
+ ARRAY_SIZE(adcmap_btm_threshold),
+ (param->high_temp),
+ high_output);
+ if (rc)
+ return rc;
+
+ *high_output *= btm_param.dy;
+ do_div(*high_output, btm_param.adc_vref);
+ *high_output += btm_param.adc_gnd;
+
+ low_threshold = (uint32_t *) low_output;
+ high_threshold = (uint32_t *) high_output;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_btm_scaler);
+
int32_t qpnp_vadc_check_result(int32_t *data)
{
if (*data < QPNP_VADC_MIN_ADC_CODE)
@@ -729,7 +847,7 @@
return -ENOMEM;
}
adc_channel_list = devm_kzalloc(&spmi->dev,
- sizeof(struct qpnp_vadc_amux) * count_adc_channel_list,
+ ((sizeof(struct qpnp_vadc_amux)) * count_adc_channel_list),
GFP_KERNEL);
if (!adc_channel_list) {
dev_err(&spmi->dev, "Unable to allocate memory\n");
@@ -842,8 +960,9 @@
adc_qpnp->offset = res->start;
/* Register the ADC peripheral interrupt */
- adc_qpnp->adc_irq = spmi_get_irq(spmi, 0, 0);
- if (adc_qpnp->adc_irq < 0) {
+ adc_qpnp->adc_irq_eoc = spmi_get_irq_byname(spmi, NULL,
+ "eoc-int-en-set");
+ if (adc_qpnp->adc_irq_eoc < 0) {
pr_err("Invalid irq\n");
return -ENXIO;
}
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 0e82cf7..b5ee104 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -695,14 +695,14 @@
return -EINVAL;
}
- rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq,
+ rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq_eoc,
qpnp_iadc_isr,
IRQF_TRIGGER_RISING, "qpnp_iadc_interrupt", iadc);
if (rc) {
dev_err(&spmi->dev, "failed to request adc irq\n");
return rc;
} else
- enable_irq_wake(iadc->adc->adc_irq);
+ enable_irq_wake(iadc->adc->adc_irq_eoc);
iadc->iadc_init_calib = false;
dev_set_drvdata(&spmi->dev, iadc);
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index c59aa5b..b71c998 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -509,6 +509,39 @@
return rc;
}
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type)
+{
+
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+
+ switch (calib_type) {
+ case CALIB_RATIOMETRIC:
+ param->dy =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy;
+ param->dx =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx;
+ param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+ param->adc_gnd =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd;
+ break;
+ case CALIB_ABSOLUTE:
+ param->dy =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy;
+ param->dx =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx;
+ param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+ param->adc_gnd =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_get_vadc_gain_and_offset);
+
int32_t qpnp_vadc_is_ready(void)
{
struct qpnp_vadc_drv *vadc = qpnp_vadc;
@@ -743,7 +776,7 @@
return rc;
}
- rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq,
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq_eoc,
qpnp_vadc_isr, IRQF_TRIGGER_RISING,
"qpnp_vadc_interrupt", vadc);
if (rc) {
@@ -751,7 +784,7 @@
"failed to request adc irq with error %d\n", rc);
return rc;
} else {
- enable_irq_wake(vadc->adc->adc_irq);
+ enable_irq_wake(vadc->adc->adc_irq_eoc);
}
qpnp_vadc = vadc;
diff --git a/drivers/input/misc/lis3dh_acc.c b/drivers/input/misc/lis3dh_acc.c
index af96d3f..cc4ee9f 100644
--- a/drivers/input/misc/lis3dh_acc.c
+++ b/drivers/input/misc/lis3dh_acc.c
@@ -1086,26 +1086,26 @@
static struct device_attribute attributes[] = {
- __ATTR(pollrate_ms, 0666, attr_get_polling_rate,
+ __ATTR(pollrate_ms, 0664, attr_get_polling_rate,
attr_set_polling_rate),
- __ATTR(range, 0666, attr_get_range, attr_set_range),
- __ATTR(enable, 0666, attr_get_enable, attr_set_enable),
- __ATTR(int1_config, 0666, attr_get_intconfig1, attr_set_intconfig1),
- __ATTR(int1_duration, 0666, attr_get_duration1, attr_set_duration1),
- __ATTR(int1_threshold, 0666, attr_get_thresh1, attr_set_thresh1),
+ __ATTR(range, 0664, attr_get_range, attr_set_range),
+ __ATTR(enable, 0664, attr_get_enable, attr_set_enable),
+ __ATTR(int1_config, 0664, attr_get_intconfig1, attr_set_intconfig1),
+ __ATTR(int1_duration, 0664, attr_get_duration1, attr_set_duration1),
+ __ATTR(int1_threshold, 0664, attr_get_thresh1, attr_set_thresh1),
__ATTR(int1_source, 0444, attr_get_source1, NULL),
- __ATTR(click_config, 0666, attr_get_click_cfg, attr_set_click_cfg),
+ __ATTR(click_config, 0664, attr_get_click_cfg, attr_set_click_cfg),
__ATTR(click_source, 0444, attr_get_click_source, NULL),
- __ATTR(click_threshold, 0666, attr_get_click_ths, attr_set_click_ths),
- __ATTR(click_timelimit, 0666, attr_get_click_tlim,
+ __ATTR(click_threshold, 0664, attr_get_click_ths, attr_set_click_ths),
+ __ATTR(click_timelimit, 0664, attr_get_click_tlim,
attr_set_click_tlim),
- __ATTR(click_timelatency, 0666, attr_get_click_tlat,
+ __ATTR(click_timelatency, 0664, attr_get_click_tlat,
attr_set_click_tlat),
- __ATTR(click_timewindow, 0666, attr_get_click_tw, attr_set_click_tw),
+ __ATTR(click_timewindow, 0664, attr_get_click_tw, attr_set_click_tw),
#ifdef DEBUG
- __ATTR(reg_value, 0666, attr_reg_get, attr_reg_set),
- __ATTR(reg_addr, 0222, NULL, attr_addr_set),
+ __ATTR(reg_value, 0664, attr_reg_get, attr_reg_set),
+ __ATTR(reg_addr, 0220, NULL, attr_addr_set),
#endif
};
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 04a7598..db6f93c 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -288,7 +288,7 @@
static struct device_attribute attributes[] = {
- __ATTR(pollrate_ms, 0666,
+ __ATTR(pollrate_ms, 0664,
mpu3050_attr_get_polling_rate,
mpu3050_attr_set_polling_rate),
};
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ec3429b..332138c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -25,6 +25,17 @@
If unsure, say N here.
+# MSM IOMMU CPU-GPU sync programming support
+config MSM_IOMMU_GPU_SYNC
+ bool "MSM IOMMU CPU-GPU Sync Support"
+ depends on (ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8930) && MSM_IOMMU && MSM_REMOTE_SPINLOCK_SFPB
+ help
+ Say Y here if you want to synchronize access to IOMMU configuration
+ port between CPU and GPU. CPU will grab a remote spinlock before
+ accessing IOMMU configuration registers and GPU will do the same.
+
+ If unsure, say N here.
+
config IOMMU_PGTABLES_L2
bool "Allow SMMU page tables in the L2 cache (Experimental)"
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f8c9809..4c72df7 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include <mach/iommu_hw-8xxx.h>
#include <mach/iommu.h>
+#include <mach/msm_smsm.h>
#define MRC(reg, processor, op1, crn, crm, op2) \
__asm__ __volatile__ ( \
@@ -66,6 +67,69 @@
DEFINE_MUTEX(msm_iommu_lock);
+/**
+ * Remote spinlock implementation based on Peterson's algorithm to be used
+ * to synchronize IOMMU config port access between CPU and GPU.
+ * This implements Process 0 of the spin lock algorithm. GPU implements
+ * Process 1. Flag and turn is stored in shared memory to allow GPU to
+ * access these.
+ */
+struct msm_iommu_remote_lock {
+ int initialized;
+ struct remote_iommu_petersons_spinlock *lock;
+};
+
+static struct msm_iommu_remote_lock msm_iommu_remote_lock;
+
+#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
+static void _msm_iommu_remote_spin_lock_init(void)
+{
+ msm_iommu_remote_lock.lock = smem_alloc(SMEM_SPINLOCK_ARRAY, 32);
+ memset(msm_iommu_remote_lock.lock, 0,
+ sizeof(*msm_iommu_remote_lock.lock));
+}
+
+void msm_iommu_remote_p0_spin_lock(void)
+{
+ msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
+ msm_iommu_remote_lock.lock->turn = 1;
+
+ smp_mb();
+
+ while (msm_iommu_remote_lock.lock->flag[PROC_GPU] == 1 &&
+ msm_iommu_remote_lock.lock->turn == 1)
+ cpu_relax();
+}
+
+void msm_iommu_remote_p0_spin_unlock(void)
+{
+ smp_mb();
+
+ msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
+}
+#endif
+
+inline void msm_iommu_mutex_lock(void)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+inline void msm_iommu_mutex_unlock(void)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+void *msm_iommu_lock_initialize(void)
+{
+ mutex_lock(&msm_iommu_lock);
+ if (!msm_iommu_remote_lock.initialized) {
+ msm_iommu_remote_lock_init();
+ msm_iommu_remote_lock.initialized = 1;
+ }
+ mutex_unlock(&msm_iommu_lock);
+ return msm_iommu_remote_lock.lock;
+}
+
struct msm_priv {
unsigned long *pgtable;
int redirect;
@@ -116,12 +180,17 @@
if (ret)
goto fail;
+ msm_iommu_remote_spin_lock();
+
asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
ctx_drvdata->num);
SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
asid | (va & TLBIVA_VA));
mb();
+
+ msm_iommu_remote_spin_unlock();
+
__disable_clocks(iommu_drvdata);
}
fail:
@@ -148,11 +217,16 @@
if (ret)
goto fail;
+ msm_iommu_remote_spin_lock();
+
asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
ctx_drvdata->num);
SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
mb();
+
+ msm_iommu_remote_spin_unlock();
+
__disable_clocks(iommu_drvdata);
}
fail:
@@ -189,6 +263,9 @@
{
unsigned int prrr, nmrr;
int i, j, found;
+
+ msm_iommu_remote_spin_lock();
+
__reset_context(base, ctx);
/* Set up HTW mode */
@@ -278,6 +355,8 @@
/* Enable the MMU */
SET_M(base, ctx, 1);
mb();
+
+ msm_iommu_remote_spin_unlock();
}
static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
@@ -417,10 +496,15 @@
if (ret)
goto fail;
+ msm_iommu_remote_spin_lock();
+
SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
__reset_context(iommu_drvdata->base, ctx_dev->num);
+
+ msm_iommu_remote_spin_unlock();
+
__disable_clocks(iommu_drvdata);
list_del_init(&ctx_drvdata->attached_elm);
ctx_drvdata->attached_domain = NULL;
@@ -1083,6 +1167,8 @@
if (ret)
goto fail;
+ msm_iommu_remote_spin_lock();
+
SET_V2PPR(base, ctx, va & V2Pxx_VA);
mb();
@@ -1097,6 +1183,8 @@
if (GET_FAULT(base, ctx))
ret = 0;
+ msm_iommu_remote_spin_unlock();
+
__disable_clocks(iommu_drvdata);
fail:
mutex_unlock(&msm_iommu_lock);
@@ -1157,6 +1245,8 @@
if (ret)
goto fail;
+ msm_iommu_remote_spin_lock();
+
fsr = GET_FSR(base, num);
if (fsr) {
@@ -1188,6 +1278,8 @@
} else
ret = IRQ_NONE;
+ msm_iommu_remote_spin_unlock();
+
__disable_clocks(drvdata);
fail:
mutex_unlock(&msm_iommu_lock);
@@ -1258,6 +1350,8 @@
if (!msm_soc_version_supports_iommu_v1())
return -ENODEV;
+ msm_iommu_lock_initialize();
+
setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 255920e..a641ce9 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -46,8 +46,8 @@
/* wled control registers */
#define WLED_MOD_CTRL_REG SSBI_REG_ADDR_WLED_CTRL(1)
#define WLED_MAX_CURR_CFG_REG(n) SSBI_REG_ADDR_WLED_CTRL(n + 2)
-#define WLED_BRIGHTNESS_CNTL_REG1(n) SSBI_REG_ADDR_WLED_CTRL(n + 5)
-#define WLED_BRIGHTNESS_CNTL_REG2(n) SSBI_REG_ADDR_WLED_CTRL(n + 6)
+#define WLED_BRIGHTNESS_CNTL_REG1(n) SSBI_REG_ADDR_WLED_CTRL((2 * n) + 5)
+#define WLED_BRIGHTNESS_CNTL_REG2(n) SSBI_REG_ADDR_WLED_CTRL((2 * n) + 6)
#define WLED_SYNC_REG SSBI_REG_ADDR_WLED_CTRL(11)
#define WLED_OVP_CFG_REG SSBI_REG_ADDR_WLED_CTRL(13)
#define WLED_BOOST_CFG_REG SSBI_REG_ADDR_WLED_CTRL(14)
@@ -640,7 +640,7 @@
/* program activation delay and maximum current */
for (i = 0; i < num_wled_strings; i++) {
rc = pm8xxx_readb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i + 2), &val);
+ WLED_MAX_CURR_CFG_REG(i), &val);
if (rc) {
dev_err(led->dev->parent, "can't read wled max current"
" config register rc=%d\n", rc);
@@ -665,7 +665,7 @@
val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
rc = pm8xxx_writeb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i + 2), val);
+ WLED_MAX_CURR_CFG_REG(i), val);
if (rc) {
dev_err(led->dev->parent, "can't write wled max current"
" config register rc=%d\n", rc);
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index b7ace53..f802a38 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -205,6 +205,8 @@
dmx_ts_data_ready_cb callback);
int (*notify_data_read)(struct dmx_ts_feed *feed,
u32 bytes_num);
+ int (*set_tsp_out_format) (struct dmx_ts_feed *feed,
+ enum dmx_tsp_format_t tsp_format);
};
/*--------------------------------------------------------------------------*/
@@ -369,9 +371,6 @@
int (*set_tsp_format) (struct dmx_demux *demux,
enum dmx_tsp_format_t tsp_format);
- int (*set_tsp_out_format) (struct dmx_demux *demux,
- enum dmx_tsp_format_t tsp_format);
-
int (*set_playback_mode) (struct dmx_demux *demux,
enum dmx_playback_mode_t mode,
dmx_ts_fullness ts_fullness_callback,
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 71642a5..507c014 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -465,6 +465,90 @@
return NULL;
}
+static int dvr_input_thread_entry(void *arg)
+{
+ struct dmxdev *dmxdev = arg;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+ int ret;
+ size_t todo;
+ size_t split;
+
+ while (1) {
+ /* wait for input */
+ ret = wait_event_interruptible(src->queue,
+ (!src->data) ||
+ (dvb_ringbuffer_avail(src)) ||
+ (src->error != 0) ||
+ (dmxdev->dvr_in_exit) ||
+ kthread_should_stop());
+
+ if ((ret < 0) || kthread_should_stop())
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (!src->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ break;
+ }
+
+ if (src->error) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ wake_up_all(&src->queue);
+ break;
+ }
+
+ dmxdev->dvr_processing_input = 1;
+
+ ret = dvb_ringbuffer_avail(src);
+ todo = ret;
+
+ split = (src->pread + ret > src->size) ?
+ src->size - src->pread :
+ 0;
+
+ /*
+ * In DVR PULL mode, write might block.
+ * Lock on DVR buffer is released before calling to
+ * write, if DVR was released meanwhile, dvr_in_exit is
+ * prompted. Lock is aquired when updating the read pointer
+ * again to preserve read/write pointers consistancy
+ */
+ if (split > 0) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ dmxdev->demux->write(dmxdev->demux,
+ src->data + src->pread,
+ split);
+
+ if (dmxdev->dvr_in_exit)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ todo -= split;
+ DVB_RINGBUFFER_SKIP(src, split);
+ }
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+ dmxdev->demux->write(dmxdev->demux,
+ src->data + src->pread, todo);
+
+ if (dmxdev->dvr_in_exit)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ DVB_RINGBUFFER_SKIP(src, todo);
+ dmxdev->dvr_processing_input = 0;
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&src->queue);
+ }
+
+ return 0;
+}
+
+
static int dvb_dvr_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
@@ -542,6 +626,17 @@
dmxdev->demux->dvr_input.priv_handle = NULL;
dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
dvbdev->writers--;
+
+ dmxdev->dvr_input_thread =
+ kthread_run(
+ dvr_input_thread_entry,
+ (void *)dmxdev,
+ "dvr_input");
+
+ if (IS_ERR(dmxdev->dvr_input_thread)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
}
dvbdev->users++;
@@ -601,11 +696,11 @@
dmxdev->demux->write_cancel(dmxdev->demux);
/*
- * Now flush dvr-in workqueue so that no one
+ * Now stop dvr-input thread so that no one
* would process data from dvr input buffer any more
* before it gets freed.
*/
- flush_workqueue(dmxdev->dvr_input_workqueue);
+ kthread_stop(dmxdev->dvr_input_thread);
dvbdev->writers++;
dmxdev->demux->disconnect_frontend(dmxdev->demux);
@@ -773,12 +868,7 @@
buf += ret;
mutex_unlock(&dmxdev->mutex);
-
wake_up_all(&src->queue);
-
- if (!work_pending(&dmxdev->dvr_input_work))
- queue_work(dmxdev->dvr_input_workqueue,
- &dmxdev->dvr_input_work);
}
return (count - todo) ? (count - todo) : ret;
@@ -827,87 +917,6 @@
return res;
}
-static void dvr_input_work_func(struct work_struct *worker)
-{
- struct dmxdev *dmxdev =
- container_of(worker, struct dmxdev, dvr_input_work);
- struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
- int ret;
- size_t todo;
- size_t split;
-
- while (1) {
- /* wait for input */
- ret = wait_event_interruptible(src->queue,
- (!src->data) ||
- (dvb_ringbuffer_avail(src)) ||
- (src->error != 0) ||
- (dmxdev->dvr_in_exit));
-
- if (ret < 0)
- break;
-
- spin_lock(&dmxdev->dvr_in_lock);
-
- if (!src->data || dmxdev->exit || dmxdev->dvr_in_exit) {
- spin_unlock(&dmxdev->dvr_in_lock);
- break;
- }
-
- if (src->error) {
- spin_unlock(&dmxdev->dvr_in_lock);
- wake_up_all(&src->queue);
- break;
- }
-
- dmxdev->dvr_processing_input = 1;
-
- ret = dvb_ringbuffer_avail(src);
- todo = ret;
-
- split = (src->pread + ret > src->size) ?
- src->size - src->pread :
- 0;
-
- /*
- * In DVR PULL mode, write might block.
- * Lock on DVR buffer is released before calling to
- * write, if DVR was released meanwhile, dvr_in_exit is
- * prompted. Lock is aquired when updating the read pointer
- * again to preserve read/write pointers consistancy
- */
- if (split > 0) {
- spin_unlock(&dmxdev->dvr_in_lock);
- dmxdev->demux->write(dmxdev->demux,
- src->data + src->pread,
- split);
-
- if (dmxdev->dvr_in_exit)
- break;
-
- spin_lock(&dmxdev->dvr_in_lock);
-
- todo -= split;
- DVB_RINGBUFFER_SKIP(src, split);
- }
-
- spin_unlock(&dmxdev->dvr_in_lock);
- dmxdev->demux->write(dmxdev->demux,
- src->data + src->pread, todo);
-
- if (dmxdev->dvr_in_exit)
- break;
-
- spin_lock(&dmxdev->dvr_in_lock);
-
- DVB_RINGBUFFER_SKIP(src, todo);
- dmxdev->dvr_processing_input = 0;
- spin_unlock(&dmxdev->dvr_in_lock);
-
- wake_up_all(&src->queue);
- }
-}
-
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
unsigned int f_flags,
unsigned long size)
@@ -1205,10 +1214,6 @@
wake_up_all(&buffer->queue);
- if (!work_pending(&dmxdev->dvr_input_work))
- queue_work(dmxdev->dvr_input_workqueue,
- &dmxdev->dvr_input_work);
-
return 0;
}
@@ -1331,6 +1336,21 @@
return 0;
}
+static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_tsp_format_t dmx_tsp_format)
+{
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
+ (dmx_tsp_format < DMX_TSP_FORMAT_188))
+ return -EINVAL;
+
+ dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
+
+ return 0;
+}
+
static int dvb_dmxdev_set_pes_buffer_size(struct dmxdev_filter *dmxdevfilter,
unsigned long size)
{
@@ -2255,6 +2275,9 @@
return ret;
}
+ if (tsfeed->set_tsp_out_format)
+ tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
+
/* Support indexing for video PES */
if ((para->pes_type == DMX_PES_VIDEO0) ||
(para->pes_type == DMX_PES_VIDEO1) ||
@@ -2465,6 +2488,8 @@
dmxdevfilter->pes_buffer_size = 32768;
+ dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
+
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
@@ -2854,19 +2879,15 @@
break;
case DMX_SET_TS_OUT_FORMAT:
- if (!dmxdev->demux->set_tsp_out_format) {
- ret = -EINVAL;
- break;
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
}
- if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
- ret = -EBUSY;
- break;
- }
-
- ret = dmxdev->demux->set_tsp_out_format(
- dmxdev->demux,
+ ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
*(enum dmx_tsp_format_t *)parg);
+
+ mutex_unlock(&dmxdevfilter->mutex);
break;
case DMX_SET_DECODER_BUFFER_SIZE:
@@ -3244,14 +3265,6 @@
if (!dmxdev->filter)
return -ENOMEM;
- dmxdev->dvr_input_workqueue =
- create_singlethread_workqueue("dvr_workqueue");
-
- if (dmxdev->dvr_input_workqueue == NULL) {
- vfree(dmxdev->filter);
- return -ENOMEM;
- }
-
dmxdev->playback_mode = DMX_PB_MODE_PUSH;
mutex_init(&dmxdev->mutex);
@@ -3272,9 +3285,6 @@
dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
- INIT_WORK(&dmxdev->dvr_input_work,
- dvr_input_work_func);
-
if (dmxdev->demux->debugfs_demux_dir)
debugfs_create_file("filters", S_IRUGO,
dmxdev->demux->debugfs_demux_dir, dmxdev,
@@ -3297,9 +3307,6 @@
dmxdev->dvr_dvbdev->users==1);
}
- flush_workqueue(dmxdev->dvr_input_workqueue);
- destroy_workqueue(dmxdev->dvr_input_workqueue);
-
dvb_unregister_device(dmxdev->dvbdev);
dvb_unregister_device(dmxdev->dvr_dvbdev);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index e30c2c3..d1c1cc3 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -34,7 +34,7 @@
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <linux/dvb/dmx.h>
#include "dvbdev.h"
@@ -122,6 +122,8 @@
/* relevent for decoder PES */
unsigned long pes_buffer_size;
+ /* for recording output */
+ enum dmx_tsp_format_t dmx_tsp_format;
u32 rec_chunk_size;
/* only for sections */
@@ -131,8 +133,6 @@
};
struct dmxdev {
- struct work_struct dvr_input_work;
-
struct dvb_device *dvbdev;
struct dvb_device *dvr_dvbdev;
@@ -165,7 +165,7 @@
struct dvb_ringbuffer dvr_input_buffer;
enum dmx_buffer_mode dvr_input_buffer_mode;
- struct workqueue_struct *dvr_input_workqueue;
+ struct task_struct *dvr_input_thread;
#define DVR_BUFFER_SIZE (10*188*1024)
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 2c5294f..6d66d45 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -42,6 +42,8 @@
*/
// #define DVB_DEMUX_SECTION_LOSS_LOG
+#define TIMESTAMP_LEN 4
+
static int dvb_demux_tscheck;
module_param(dvb_demux_tscheck, int, 0644);
MODULE_PARM_DESC(dvb_demux_tscheck,
@@ -417,17 +419,16 @@
static inline void dvb_dmx_swfilter_output_packet(
struct dvb_demux_feed *feed,
- const u8 *buf)
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
{
- u8 time_stamp[4] = {0};
- struct dvb_demux *demux = feed->demux;
-
/*
* if we output 192 packet with timestamp at head of packet,
* output the timestamp now before the 188 TS packet
*/
- if (demux->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
- feed->cb.ts(time_stamp, 4, NULL, 0, &feed->feed.ts, DMX_OK);
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts, DMX_OK);
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
@@ -435,8 +436,9 @@
* if we output 192 packet with timestamp at tail of packet,
* output the timestamp now after the 188 TS packet
*/
- if (demux->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
- feed->cb.ts(time_stamp, 4, NULL, 0, &feed->feed.ts, DMX_OK);
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts, DMX_OK);
}
static inline void dvb_dmx_configure_decoder_fullness(
@@ -571,7 +573,7 @@
}
static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
- const u8 *buf)
+ const u8 *buf, const u8 timestamp[TIMESTAMP_LEN])
{
switch (feed->type) {
case DMX_TYPE_TS:
@@ -581,7 +583,8 @@
if (feed->ts_type & TS_PAYLOAD_ONLY)
dvb_dmx_swfilter_payload(feed, buf);
else
- dvb_dmx_swfilter_output_packet(feed, buf);
+ dvb_dmx_swfilter_output_packet(feed,
+ buf, timestamp);
}
if (feed->ts_type & TS_DECODER)
if (feed->demux->write_to_decoder)
@@ -605,7 +608,8 @@
((f)->feed.ts.is_filtering) && \
(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
-static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
+static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
{
struct dvb_demux_feed *feed;
u16 pid = ts_pid(buf);
@@ -678,10 +682,10 @@
continue;
if (feed->pid == pid)
- dvb_dmx_swfilter_packet_type(feed, buf);
+ dvb_dmx_swfilter_packet_type(feed, buf, timestamp);
else if ((feed->pid == 0x2000) &&
(feed->feed.ts.is_filtering))
- dvb_dmx_swfilter_output_packet(feed, buf);
+ dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
}
}
@@ -735,6 +739,7 @@
size_t count)
{
struct timespec pre_time;
+ u8 timestamp[TIMESTAMP_LEN] = {0};
if (dvb_demux_performancecheck)
pre_time = current_kernel_time();
@@ -746,7 +751,7 @@
while (count--) {
if (buf[0] == 0x47)
- dvb_dmx_swfilter_packet(demux, buf);
+ dvb_dmx_swfilter_packet(demux, buf, timestamp);
buf += 188;
}
@@ -794,6 +799,7 @@
int p = 0, i, j;
const u8 *q;
struct timespec pre_time;
+ u8 timestamp[TIMESTAMP_LEN];
if (dvb_demux_performancecheck)
pre_time = current_kernel_time();
@@ -812,12 +818,23 @@
goto bailout;
}
memcpy(&demux->tsbuf[i], buf, j);
+
+ if (pktsize == 192) {
+ if (leadingbytes)
+ memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+ else
+ memcpy(timestamp, &buf[188], TIMESTAMP_LEN);
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
+
if (pktsize == 192 &&
leadingbytes &&
demux->tsbuf[leadingbytes] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux, demux->tsbuf+4);
+ dvb_dmx_swfilter_packet(demux,
+ demux->tsbuf + TIMESTAMP_LEN, timestamp);
else if (demux->tsbuf[0] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux, demux->tsbuf);
+ dvb_dmx_swfilter_packet(demux, demux->tsbuf, timestamp);
demux->tsbufp = 0;
p += j;
}
@@ -841,10 +858,18 @@
q = demux->tsbuf;
}
- if (pktsize == 192 && leadingbytes)
- q = &buf[p+leadingbytes];
+ if (pktsize == 192) {
+ if (leadingbytes) {
+ q = &buf[p+leadingbytes];
+ memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+ } else {
+ memcpy(timestamp, &buf[188], TIMESTAMP_LEN);
+ }
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
- dvb_dmx_swfilter_packet(demux, q);
+ dvb_dmx_swfilter_packet(demux, q, timestamp);
p += pktsize;
}
@@ -891,7 +916,7 @@
break;
case DMX_TSP_FORMAT_192_HEAD:
- _dvb_dmx_swfilter(demux, buf, count, 192, 4);
+ _dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN);
break;
case DMX_TSP_FORMAT_204:
@@ -1154,6 +1179,25 @@
return 0;
}
+static int dmx_ts_set_tsp_out_format(
+ struct dmx_ts_feed *ts_feed,
+ enum dmx_tsp_format_t tsp_format)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ feed->tsp_out_format = tsp_format;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
struct dmx_ts_feed **ts_feed,
dmx_ts_cb callback)
@@ -1175,6 +1219,7 @@
feed->pid = 0xffff;
feed->peslen = 0;
feed->buffer = NULL;
+ feed->tsp_out_format = DMX_TSP_FORMAT_188;
memset(&feed->indexing_params, 0,
sizeof(struct dmx_indexing_video_params));
@@ -1193,6 +1238,7 @@
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
(*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
+ (*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
(*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
(*ts_feed)->notify_data_read = NULL;
@@ -1689,23 +1735,6 @@
return 0;
}
-static int dvbdmx_set_tsp_out_format(
- struct dmx_demux *demux,
- enum dmx_tsp_format_t tsp_format)
-{
- struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
-
- if ((tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
- (tsp_format < DMX_TSP_FORMAT_188))
- return -EINVAL;
-
- mutex_lock(&dvbdemux->mutex);
-
- dvbdemux->tsp_out_format = tsp_format;
- mutex_unlock(&dvbdemux->mutex);
- return 0;
-}
-
int dvb_dmx_init(struct dvb_demux *dvbdemux)
{
int i;
@@ -1776,7 +1805,6 @@
dvbdemux->tsbufp = 0;
dvbdemux->tsp_format = DMX_TSP_FORMAT_188;
- dvbdemux->tsp_out_format = DMX_TSP_FORMAT_188;
if (!dvbdemux->check_crc32)
dvbdemux->check_crc32 = dvb_dmx_crc32;
@@ -1806,7 +1834,6 @@
dmx->get_pes_pids = dvbdmx_get_pes_pids;
dmx->set_tsp_format = dvbdmx_set_tsp_format;
- dmx->set_tsp_out_format = dvbdmx_set_tsp_out_format;
mutex_init(&dvbdemux->mutex);
spin_lock_init(&dvbdemux->lock);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 706cd0c..4e6dfaf 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -90,6 +90,7 @@
u16 pid;
u8 *buffer;
int buffer_size;
+ enum dmx_tsp_format_t tsp_out_format;
struct timespec timeout;
struct dvb_demux_filter *filter;
@@ -155,7 +156,6 @@
uint32_t speed_pkts_cnt; /* for TS speed check */
enum dmx_tsp_format_t tsp_format;
- enum dmx_tsp_format_t tsp_out_format;
enum dmx_playback_mode_t playback_mode;
int sw_filter_abort;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
index 3500eda..e9987c2 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
@@ -30,7 +30,7 @@
/**
* TSIF alias name length
*/
-#define TSIF_NAME_LENGTH 10
+#define TSIF_NAME_LENGTH 20
#define MPQ_MAX_FOUND_PATTERNS 5
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index 2e783f6..bbf9d0a 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/tsif_api.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <linux/moduleparam.h>
#include "mpq_dvb_debug.h"
#include "mpq_dmx_plugin_common.h"
@@ -38,17 +38,8 @@
static int tsif_mode = DMX_TSIF_DRIVER_MODE_DEF;
static int clock_inv;
module_param(threshold, int, S_IRUGO);
-module_param(tsif_mode, int, S_IRUGO);
-module_param(clock_inv, int, S_IRUGO);
-
-/*
- * Work scheduled each time TSIF notifies dmx
- * of new TS packet
- */
-struct tsif_work {
- struct work_struct work;
- int tsif_id;
-};
+module_param(tsif_mode, int, S_IRUGO | S_IWUSR);
+module_param(clock_inv, int, S_IRUGO | S_IWUSR);
/*
@@ -78,11 +69,12 @@
{
/* Information for each TSIF input processing */
struct {
- /* work used to submit to workqueue for processing */
- struct tsif_work work;
+ /* thread processing TS packets from TSIF */
+ struct task_struct *thread;
+ wait_queue_head_t wait_queue;
- /* workqueue that processes TS packets from specific TSIF */
- struct workqueue_struct *workqueue;
+ /* Counter for data notifications from TSIF */
+ atomic_t data_cnt;
/* TSIF alias */
char name[TSIF_NAME_LENGTH];
@@ -103,94 +95,72 @@
/**
- * Worker function that processes the TS packets notified by the TSIF driver.
+ * Demux thread function handling data from specific TSIF.
*
- * @worker: the executed work
+ * @arg: TSIF number
*/
-static void mpq_dmx_tsif_work(struct work_struct *worker)
+static int mpq_dmx_tsif_thread(void *arg)
{
- struct tsif_work *tsif_work =
- container_of(worker, struct tsif_work, work);
struct mpq_demux *mpq_demux;
struct tsif_driver_info *tsif_driver;
size_t packets = 0;
- int tsif = tsif_work->tsif_id;
+ int tsif = (int)arg;
+ int ret;
- mpq_demux = mpq_dmx_tsif_info.tsif[tsif].mpq_demux;
- tsif_driver = &(mpq_dmx_tsif_info.tsif[tsif].tsif_driver);
+ do {
+ ret = wait_event_interruptible(
+ mpq_dmx_tsif_info.tsif[tsif].wait_queue,
+ (atomic_read(
+ &mpq_dmx_tsif_info.tsif[tsif].data_cnt) != 0) ||
+ kthread_should_stop());
- MPQ_DVB_DBG_PRINT(
- "%s executed, tsif = %d\n",
- __func__,
- tsif);
+ if ((ret < 0) || kthread_should_stop()) {
+ MPQ_DVB_DBG_PRINT("%s: exit\n", __func__);
+ break;
+ }
- if (mutex_lock_interruptible(&mpq_dmx_tsif_info.tsif[tsif].mutex))
- return;
+ if (mutex_lock_interruptible(
+ &mpq_dmx_tsif_info.tsif[tsif].mutex))
+ return -ERESTARTSYS;
- /* Check if driver handler is still valid */
- if (tsif_driver->tsif_handler == NULL) {
- mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
- MPQ_DVB_ERR_PRINT("%s: tsif_driver->tsif_handler is NULL!\n",
+ tsif_driver = &(mpq_dmx_tsif_info.tsif[tsif].tsif_driver);
+ mpq_demux = mpq_dmx_tsif_info.tsif[tsif].mpq_demux;
+
+ /* Check if driver handler is still valid */
+ if (tsif_driver->tsif_handler == NULL) {
+ mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
+ MPQ_DVB_DBG_PRINT(
+ "%s: tsif was detached\n",
__func__);
- return;
- }
+ continue;
+ }
- tsif_get_state(tsif_driver->tsif_handler, &(tsif_driver->ri),
- &(tsif_driver->wi), &(tsif_driver->state));
+ tsif_get_state(
+ tsif_driver->tsif_handler, &(tsif_driver->ri),
+ &(tsif_driver->wi), &(tsif_driver->state));
- if ((tsif_driver->wi == tsif_driver->ri) ||
- (tsif_driver->state == tsif_state_stopped) ||
- (tsif_driver->state == tsif_state_error)) {
+ if ((tsif_driver->wi == tsif_driver->ri) ||
+ (tsif_driver->state == tsif_state_stopped) ||
+ (tsif_driver->state == tsif_state_error)) {
- mpq_demux->hw_notification_size = 0;
+ mpq_demux->hw_notification_size = 0;
- mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
+ mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
- MPQ_DVB_ERR_PRINT(
- "%s: invalid TSIF state (%d), wi = (%d), ri = (%d)\n",
- __func__,
- tsif_driver->state, tsif_driver->wi, tsif_driver->ri);
- return;
- }
+ MPQ_DVB_DBG_PRINT(
+ "%s: TSIF invalid state %d, %d, %d\n",
+ __func__,
+ tsif_driver->state,
+ tsif_driver->wi,
+ tsif_driver->ri);
+ continue;
+ }
- if (tsif_driver->wi > tsif_driver->ri) {
- packets = (tsif_driver->wi - tsif_driver->ri);
- mpq_demux->hw_notification_size = packets;
+ atomic_dec(&mpq_dmx_tsif_info.tsif[tsif].data_cnt);
- dvb_dmx_swfilter_format(
- &mpq_demux->demux,
- (tsif_driver->data_buffer +
- (tsif_driver->ri * TSIF_PKT_SIZE)),
- (packets * TSIF_PKT_SIZE),
- DMX_TSP_FORMAT_192_TAIL);
-
- tsif_driver->ri =
- (tsif_driver->ri + packets) % tsif_driver->buffer_size;
-
- tsif_reclaim_packets(tsif_driver->tsif_handler,
- tsif_driver->ri);
- } else {
- /*
- * wi < ri, means wraparound on cyclic buffer.
- * Handle in two stages.
- */
- packets = (tsif_driver->buffer_size - tsif_driver->ri);
- mpq_demux->hw_notification_size = packets;
-
- dvb_dmx_swfilter_format(
- &mpq_demux->demux,
- (tsif_driver->data_buffer +
- (tsif_driver->ri * TSIF_PKT_SIZE)),
- (packets * TSIF_PKT_SIZE),
- DMX_TSP_FORMAT_192_TAIL);
-
- /* tsif_driver->ri should be 0 after this */
- tsif_driver->ri =
- (tsif_driver->ri + packets) % tsif_driver->buffer_size;
-
- packets = tsif_driver->wi;
- if (packets > 0) {
- mpq_demux->hw_notification_size += packets;
+ if (tsif_driver->wi > tsif_driver->ri) {
+ packets = (tsif_driver->wi - tsif_driver->ri);
+ mpq_demux->hw_notification_size = packets;
dvb_dmx_swfilter_format(
&mpq_demux->demux,
@@ -202,13 +172,55 @@
tsif_driver->ri =
(tsif_driver->ri + packets) %
tsif_driver->buffer_size;
+
+ tsif_reclaim_packets(
+ tsif_driver->tsif_handler,
+ tsif_driver->ri);
+ } else {
+ /*
+ * wi < ri, means wraparound on cyclic buffer.
+ * Handle in two stages.
+ */
+ packets = (tsif_driver->buffer_size - tsif_driver->ri);
+ mpq_demux->hw_notification_size = packets;
+
+ dvb_dmx_swfilter_format(
+ &mpq_demux->demux,
+ (tsif_driver->data_buffer +
+ (tsif_driver->ri * TSIF_PKT_SIZE)),
+ (packets * TSIF_PKT_SIZE),
+ DMX_TSP_FORMAT_192_TAIL);
+
+ /* tsif_driver->ri should be 0 after this */
+ tsif_driver->ri =
+ (tsif_driver->ri + packets) %
+ tsif_driver->buffer_size;
+
+ packets = tsif_driver->wi;
+ if (packets > 0) {
+ mpq_demux->hw_notification_size += packets;
+
+ dvb_dmx_swfilter_format(
+ &mpq_demux->demux,
+ (tsif_driver->data_buffer +
+ (tsif_driver->ri * TSIF_PKT_SIZE)),
+ (packets * TSIF_PKT_SIZE),
+ DMX_TSP_FORMAT_192_TAIL);
+
+ tsif_driver->ri =
+ (tsif_driver->ri + packets) %
+ tsif_driver->buffer_size;
+ }
+
+ tsif_reclaim_packets(
+ tsif_driver->tsif_handler,
+ tsif_driver->ri);
}
- tsif_reclaim_packets(tsif_driver->tsif_handler,
- tsif_driver->ri);
- }
+ mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
+ } while (1);
- mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
+ return 0;
}
@@ -220,7 +232,6 @@
static void mpq_tsif_callback(void *user)
{
int tsif = (int)user;
- struct work_struct *work;
struct mpq_demux *mpq_demux;
MPQ_DVB_DBG_PRINT("%s executed, tsif = %d\n", __func__, tsif);
@@ -229,11 +240,8 @@
mpq_demux = mpq_dmx_tsif_info.tsif[tsif].mpq_demux;
mpq_dmx_update_hw_statistics(mpq_demux);
- work = &mpq_dmx_tsif_info.tsif[tsif].work.work;
-
- /* Scheudle a new work to demux workqueue */
- if (!work_pending(work))
- queue_work(mpq_dmx_tsif_info.tsif[tsif].workqueue, work);
+ atomic_inc(&mpq_dmx_tsif_info.tsif[tsif].data_cnt);
+ wake_up(&mpq_dmx_tsif_info.tsif[tsif].wait_queue);
}
@@ -376,20 +384,10 @@
tsif_driver = &(mpq_dmx_tsif_info.tsif[tsif].tsif_driver);
tsif_stop(tsif_driver->tsif_handler);
tsif_detach(tsif_driver->tsif_handler);
- /*
- * temporarily release mutex and flush the work queue
- * before setting tsif_handler to NULL
- */
- mutex_unlock(&mpq_dmx_tsif_info.tsif[tsif].mutex);
- flush_workqueue(mpq_dmx_tsif_info.tsif[tsif].workqueue);
- /* re-acquire mutex */
- if (mutex_lock_interruptible(
- &mpq_dmx_tsif_info.tsif[tsif].mutex))
- return -ERESTARTSYS;
-
tsif_driver->tsif_handler = NULL;
tsif_driver->data_buffer = NULL;
tsif_driver->buffer_size = 0;
+ atomic_set(&mpq_dmx_tsif_info.tsif[tsif].data_cnt, 0);
mpq_dmx_tsif_info.tsif[tsif].mpq_demux = NULL;
}
@@ -708,31 +706,28 @@
}
for (i = 0; i < TSIF_COUNT; i++) {
- mpq_dmx_tsif_info.tsif[i].work.tsif_id = i;
-
- INIT_WORK(&mpq_dmx_tsif_info.tsif[i].work.work,
- mpq_dmx_tsif_work);
-
snprintf(mpq_dmx_tsif_info.tsif[i].name,
TSIF_NAME_LENGTH,
- "tsif_%d",
+ "dmx_tsif%d",
i);
- mpq_dmx_tsif_info.tsif[i].workqueue =
- create_singlethread_workqueue(
+ atomic_set(&mpq_dmx_tsif_info.tsif[i].data_cnt, 0);
+ init_waitqueue_head(&mpq_dmx_tsif_info.tsif[i].wait_queue);
+ mpq_dmx_tsif_info.tsif[i].thread =
+ kthread_run(
+ mpq_dmx_tsif_thread, (void *)i,
mpq_dmx_tsif_info.tsif[i].name);
- if (mpq_dmx_tsif_info.tsif[i].workqueue == NULL) {
+ if (IS_ERR(mpq_dmx_tsif_info.tsif[i].thread)) {
int j;
for (j = 0; j < i; j++) {
- destroy_workqueue(
- mpq_dmx_tsif_info.tsif[j].workqueue);
+ kthread_stop(mpq_dmx_tsif_info.tsif[j].thread);
mutex_destroy(&mpq_dmx_tsif_info.tsif[j].mutex);
}
MPQ_DVB_ERR_PRINT(
- "%s: create_singlethread_workqueue failed\n",
+ "%s: kthread_run failed\n",
__func__);
return -ENOMEM;
@@ -753,7 +748,7 @@
ret);
for (i = 0; i < TSIF_COUNT; i++) {
- destroy_workqueue(mpq_dmx_tsif_info.tsif[i].workqueue);
+ kthread_stop(mpq_dmx_tsif_info.tsif[i].thread);
mutex_destroy(&mpq_dmx_tsif_info.tsif[i].mutex);
}
}
@@ -781,16 +776,13 @@
if (tsif_driver->tsif_handler)
tsif_stop(tsif_driver->tsif_handler);
}
+
/* Detach from TSIF driver to avoid further notifications. */
if (tsif_driver->tsif_handler)
tsif_detach(tsif_driver->tsif_handler);
- /* release mutex to allow work queue to finish scheduled work */
mutex_unlock(&mpq_dmx_tsif_info.tsif[i].mutex);
- /* flush the work queue and destroy it */
- flush_workqueue(mpq_dmx_tsif_info.tsif[i].workqueue);
- destroy_workqueue(mpq_dmx_tsif_info.tsif[i].workqueue);
-
+ kthread_stop(mpq_dmx_tsif_info.tsif[i].thread);
mutex_destroy(&mpq_dmx_tsif_info.tsif[i].mutex);
}
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index 360d96a..f5c01e1 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -12,12 +12,11 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <mach/msm_tspp.h>
#include "mpq_dvb_debug.h"
#include "mpq_dmx_plugin_common.h"
-
#define TSIF_COUNT 2
#define TSPP_MAX_PID_FILTER_NUM 16
@@ -28,6 +27,7 @@
/* For each TSIF we allocate two pipes, one for PES and one for sections */
#define TSPP_PES_CHANNEL 0
#define TSPP_SECTION_CHANNEL 1
+#define TSPP_CHANNEL_COUNT 2
/* the channel_id set to TSPP driver based on TSIF number and channel type */
#define TSPP_CHANNEL_ID(tsif, ch) ((tsif << 1) + ch)
@@ -84,18 +84,10 @@
static int clock_inv;
static int tsif_mode = 2;
static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
-module_param(tsif_mode, int, S_IRUGO);
-module_param(clock_inv, int, S_IRUGO);
+module_param(tsif_mode, int, S_IRUGO | S_IWUSR);
+module_param(clock_inv, int, S_IRUGO | S_IWUSR);
module_param(allocation_mode, int, S_IRUGO);
-/*
- * Work scheduled each time TSPP notifies dmx
- * of new TS packet in some channel
- */
-struct tspp_work {
- struct work_struct work;
- int channel_id;
-};
/* The following structure hold singelton information
* required for dmx implementation on top of TSPP.
@@ -111,8 +103,8 @@
*/
int pes_channel_ref;
- /* work used to submit to workqueue to process pes channel */
- struct tspp_work pes_work;
+ /* Counter for data notifications on PES pipe */
+ atomic_t pes_data_cnt;
/* ION handle used for TSPP data buffer allocation */
struct ion_handle *pes_mem_heap_handle;
@@ -130,8 +122,8 @@
*/
int section_channel_ref;
- /* work used to submit to workqueue to process pes channel */
- struct tspp_work section_work;
+ /* Counter for data notifications on section pipe */
+ atomic_t section_data_cnt;
/* ION handle used for TSPP data buffer allocation */
struct ion_handle *section_mem_heap_handle;
@@ -151,8 +143,9 @@
int ref_count;
} filters[TSPP_MAX_PID_FILTER_NUM];
- /* workqueue that processes TS packets from specific TSIF */
- struct workqueue_struct *workqueue;
+ /* thread processing TS packets from TSPP */
+ struct task_struct *thread;
+ wait_queue_head_t wait_queue;
/* TSIF alias */
char name[TSIF_NAME_LENGTH];
@@ -274,55 +267,93 @@
}
/**
- * Worker function that processes the TS packets notified by TSPP.
+ * Demux thread function handling data from specific TSIF.
*
- * @worker: the executed work
+ * @arg: TSIF number
*/
-static void mpq_dmx_tspp_work(struct work_struct *worker)
+static int mpq_dmx_tspp_thread(void *arg)
{
- struct tspp_work *tspp_work =
- container_of(worker, struct tspp_work, work);
+ int tsif = (int)arg;
struct mpq_demux *mpq_demux;
- int channel_id = tspp_work->channel_id;
- int tsif = TSPP_GET_TSIF_NUM(channel_id);
const struct tspp_data_descriptor *tspp_data_desc;
+ atomic_t *data_cnt;
int ref_count;
+ int ret;
+ int i;
- mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ do {
+ ret = wait_event_interruptible(
+ mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+ (atomic_read(
+ &mpq_dmx_tspp_info.tsif[tsif].pes_data_cnt)) ||
+ (atomic_read(
+ &mpq_dmx_tspp_info.tsif[tsif].section_data_cnt)) ||
+ kthread_should_stop());
- /* Lock against the TSPP filters data-structure */
- if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex))
- return;
+ if ((ret < 0) || kthread_should_stop()) {
+ MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+ break;
+ }
- /* Make sure channel is still active */
- if (TSPP_IS_PES_CHANNEL(channel_id))
- ref_count = mpq_dmx_tspp_info.tsif[tsif].pes_channel_ref;
- else
- ref_count = mpq_dmx_tspp_info.tsif[tsif].section_channel_ref;
+ /* Lock against the TSPP filters data-structure */
+ if (mutex_lock_interruptible(
+ &mpq_dmx_tspp_info.tsif[tsif].mutex))
+ return -ERESTARTSYS;
- if (ref_count == 0) {
+ for (i = 0; i < TSPP_CHANNEL_COUNT; i++) {
+ int channel_id = TSPP_CHANNEL_ID(tsif, i);
+
+ if (TSPP_IS_PES_CHANNEL(channel_id)) {
+ ref_count =
+ mpq_dmx_tspp_info.tsif[tsif].pes_channel_ref;
+ data_cnt =
+ &mpq_dmx_tspp_info.tsif[tsif].pes_data_cnt;
+ } else {
+ ref_count =
+ mpq_dmx_tspp_info.tsif[tsif].
+ section_channel_ref;
+ data_cnt =
+ &mpq_dmx_tspp_info.tsif[tsif].section_data_cnt;
+ }
+
+ /* Make sure channel is still active */
+ if (ref_count == 0)
+ continue;
+
+ atomic_dec(data_cnt);
+
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_demux->hw_notification_size = 0;
+
+ /*
+ * Go through all filled descriptors
+ * and perform demuxing on them
+ */
+ while ((tspp_data_desc =
+ tspp_get_buffer(0, channel_id)) != NULL) {
+ mpq_demux->hw_notification_size +=
+ (tspp_data_desc->size /
+ TSPP_RAW_TTS_SIZE);
+
+ dvb_dmx_swfilter_format(
+ &mpq_demux->demux,
+ tspp_data_desc->virt_base,
+ tspp_data_desc->size,
+ DMX_TSP_FORMAT_192_TAIL);
+
+ /*
+ * Notify TSPP that the buffer
+ * is no longer needed
+ */
+ tspp_release_buffer(0,
+ channel_id, tspp_data_desc->id);
+ }
+ }
+
mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
- return;
- }
+ } while (1);
- mpq_demux->hw_notification_size = 0;
-
- /* Go through all filled descriptors and perform demuxing on them */
- while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
- mpq_demux->hw_notification_size +=
- (tspp_data_desc->size / TSPP_RAW_TTS_SIZE);
-
- dvb_dmx_swfilter_format(
- &mpq_demux->demux,
- tspp_data_desc->virt_base,
- tspp_data_desc->size,
- DMX_TSP_FORMAT_192_TAIL);
-
- /* Notify TSPP that the buffer is no longer needed */
- tspp_release_buffer(0, channel_id, tspp_data_desc->id);
- }
-
- mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ return 0;
}
/**
@@ -334,7 +365,6 @@
static void mpq_tspp_callback(int channel_id, void *user)
{
int tsif = (int)user;
- struct work_struct *work;
struct mpq_demux *mpq_demux;
/* Save statistics on TSPP notifications */
@@ -342,13 +372,11 @@
mpq_dmx_update_hw_statistics(mpq_demux);
if (TSPP_IS_PES_CHANNEL(channel_id))
- work = &mpq_dmx_tspp_info.tsif[tsif].pes_work.work;
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].pes_data_cnt);
else
- work = &mpq_dmx_tspp_info.tsif[tsif].section_work.work;
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].section_data_cnt);
- /* Scheudle a new work to demux workqueue */
- if (!work_pending(work))
- queue_work(mpq_dmx_tspp_info.tsif[tsif].workqueue, work);
+ wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
}
/**
@@ -586,6 +614,7 @@
int tsif;
int ret;
int channel_id;
+ atomic_t *data_cnt;
int *channel_ref_count;
struct tspp_filter tspp_filter;
struct mpq_demux *mpq_demux = feed->demux->priv;
@@ -613,10 +642,12 @@
channel_id = TSPP_CHANNEL_ID(tsif, TSPP_PES_CHANNEL);
channel_ref_count =
&mpq_dmx_tspp_info.tsif[tsif].pes_channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].pes_data_cnt;
} else {
channel_id = TSPP_CHANNEL_ID(tsif, TSPP_SECTION_CHANNEL);
channel_ref_count =
&mpq_dmx_tspp_info.tsif[tsif].section_channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].section_data_cnt;
}
/* check if required TSPP pipe is already allocated or not */
@@ -677,6 +708,7 @@
tspp_unregister_notification(0, channel_id);
tspp_close_channel(0, channel_id);
tspp_close_stream(0, channel_id);
+ atomic_set(data_cnt, 0);
}
mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
@@ -1079,24 +1111,14 @@
mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle = NULL;
mpq_dmx_tspp_info.tsif[i].pes_mem_heap_virt_base = NULL;
mpq_dmx_tspp_info.tsif[i].pes_mem_heap_phys_base = 0;
-
- mpq_dmx_tspp_info.tsif[i].pes_work.channel_id =
- TSPP_CHANNEL_ID(i, TSPP_PES_CHANNEL);
-
- INIT_WORK(&mpq_dmx_tspp_info.tsif[i].pes_work.work,
- mpq_dmx_tspp_work);
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].pes_data_cnt, 0);
mpq_dmx_tspp_info.tsif[i].section_channel_ref = 0;
mpq_dmx_tspp_info.tsif[i].section_index = 0;
mpq_dmx_tspp_info.tsif[i].section_mem_heap_handle = NULL;
mpq_dmx_tspp_info.tsif[i].section_mem_heap_virt_base = NULL;
mpq_dmx_tspp_info.tsif[i].section_mem_heap_phys_base = 0;
-
- mpq_dmx_tspp_info.tsif[i].section_work.channel_id =
- TSPP_CHANNEL_ID(i, TSPP_SECTION_CHANNEL);
-
- INIT_WORK(&mpq_dmx_tspp_info.tsif[i].section_work.work,
- mpq_dmx_tspp_work);
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].section_data_cnt, 0);
for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
@@ -1105,22 +1127,23 @@
snprintf(mpq_dmx_tspp_info.tsif[i].name,
TSIF_NAME_LENGTH,
- "tsif_%d",
+ "dmx_tsif%d",
i);
- mpq_dmx_tspp_info.tsif[i].workqueue =
- create_singlethread_workqueue(
+ init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+ mpq_dmx_tspp_info.tsif[i].thread =
+ kthread_run(
+ mpq_dmx_tspp_thread, (void *)i,
mpq_dmx_tspp_info.tsif[i].name);
- if (mpq_dmx_tspp_info.tsif[i].workqueue == NULL) {
+ if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
for (j = 0; j < i; j++) {
- destroy_workqueue(
- mpq_dmx_tspp_info.tsif[j].workqueue);
-
+ kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
}
+
MPQ_DVB_ERR_PRINT(
- "%s: create_singlethread_workqueue failed\n",
+ "%s: kthread_run failed\n",
__func__);
return -ENOMEM;
@@ -1138,7 +1161,7 @@
ret);
for (i = 0; i < TSIF_COUNT; i++) {
- destroy_workqueue(mpq_dmx_tspp_info.tsif[i].workqueue);
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
}
}
@@ -1179,8 +1202,7 @@
mpq_dmx_tsif_ion_cleanup(i);
mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
- flush_workqueue(mpq_dmx_tspp_info.tsif[i].workqueue);
- destroy_workqueue(mpq_dmx_tspp_info.tsif[i].workqueue);
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
}
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index fde7cb7..363b541 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -2535,7 +2535,6 @@
case SCAN_FOR_WEAK:
radio->srch_st_list.srch_list_dir = dir;
radio->srch_st_list.srch_list_mode = srch;
- radio->srch_st_list.srch_list_max = 0;
retval = hci_fm_search_station_list(
&radio->srch_st_list, radio->fm_hdev);
break;
@@ -3170,6 +3169,7 @@
radio->srch_rds.srch_pi = ctrl->value;
break;
case V4L2_CID_PRIVATE_IRIS_SRCH_CNT:
+ radio->srch_st_list.srch_list_max = ctrl->value;
break;
case V4L2_CID_PRIVATE_IRIS_SPACING:
if (radio->mode == FM_RECV) {
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 49543a4..f61b74f 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -473,16 +473,15 @@
kfree(ctrlcmd);
free_qcmd(rcmd);
D("%s: rc %d\n", __func__, rc);
- /* rc is the time elapsed. */
- if (rc >= 0) {
- /* TODO: Refactor msm_ctrl_cmd::status field */
- if (out->status == 0)
- rc = -1;
- else if (out->status == 1 || out->status == 4)
- rc = 0;
- else
- rc = -EINVAL;
- }
+ /* rc is the time elapsed.
+ * This means that the communication with the daemon itself was
+ * successful(irrespective of the handling of the ctrlcmd).
+ * So, just reset the rc to 0 to indicate success.
+ * Its upto the caller to parse the ctrlcmd to check the status. We
+ * dont need to parse it here. */
+ if (rc >= 0)
+ rc = 0;
+
return rc;
ctrlcmd_alloc_fail:
@@ -846,9 +845,9 @@
rc = -EINVAL;
goto end;
}
-
+ tmp_cmd.status = cmd_ptr->status = ctrlcmd.status;
if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
- (void *)&tmp_cmd, cmd_len)) {
+ (void *)cmd_ptr, cmd_len)) {
pr_err("%s: copy_to_user failed in cpy, size=%d\n",
__func__, cmd_len);
rc = -EINVAL;
@@ -1645,6 +1644,7 @@
static const struct v4l2_ioctl_ops msm_ioctl_ops_server = {
.vidioc_subscribe_event = msm_server_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = msm_server_v4l2_unsubscribe_event,
.vidioc_default = msm_ioctl_server,
};
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 5e39d25..387c254 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -17,7 +17,7 @@
#include <linux/proc_fs.h>
#include <linux/ioctl.h>
#include <mach/camera.h>
-#include "msm.h"
+#include "../msm.h"
uint32_t msm_cam_server_get_mctl_handle(void);
struct iommu_domain *msm_cam_server_get_domain(void);
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index c4bfaf4..c281f9c 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -47,6 +47,27 @@
"Decode Order",
NULL
};
+static const char *const mpeg_video_vidc_extradata[] = {
+ "Extradata none",
+ "Extradata MB Quantization",
+ "Extradata Interlace Video",
+ "Extradata VC1 Framedisp",
+ "Extradata VC1 Seqdisp",
+ "Extradata timestamp",
+ "Extradata S3D Frame Packing",
+ "Extradata Frame Rate",
+ "Extradata Panscan Window",
+ "Extradata Recovery point SEI",
+ "Extradata Closed Caption UD",
+ "Extradata AFD UD",
+ "Extradata Multislice info",
+ "Extradata number of concealed MB",
+ "Extradata metadata filler",
+ "Extradata input crop",
+ "Extradata digital zoom",
+ "Extradata aspect ratio",
+};
+
static const struct msm_vidc_ctrl msm_vdec_ctrls[] = {
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT,
@@ -168,6 +189,36 @@
.menu_skip_mask = 0,
.qmenu = NULL,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+ .name = "Extradata Type",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+ .default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
+ ),
+ .qmenu = mpeg_video_vidc_extradata,
+ .step = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
@@ -580,10 +631,7 @@
}
inst->prop.fps = (u8) (USEC_PER_SEC / us_per_frame);
if (inst->prop.fps) {
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
}
exit:
return rc;
@@ -830,11 +878,7 @@
"Failed to set persist buffers: %d\n", rc);
goto fail_start;
}
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks. Performance might be impacted\n");
- }
-
+ msm_comm_scale_clocks_and_bus(inst);
rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
if (rc) {
dprintk(VIDC_ERR,
@@ -925,10 +969,7 @@
rc = -EINVAL;
break;
}
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks. Power might be impacted\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
if (rc)
dprintk(VIDC_ERR,
@@ -1087,6 +1128,15 @@
inst->mode = VIDC_SECURE;
dprintk(VIDC_DBG, "Setting secure mode to :%d\n", inst->mode);
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+ {
+ struct hal_extradata_enable extra;
+ property_id = HAL_PARAM_INDEX_EXTRADATA;
+ extra.index = msm_comm_get_hal_extradata_index(control.value);
+ extra.enable = 1;
+ pdata = &extra;
+ break;
+ }
default:
break;
}
@@ -1111,6 +1161,7 @@
failed_open_done:
return rc;
}
+
static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
return 0;
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index d01841d..f4c973f 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -639,10 +639,7 @@
dprintk(VIDC_ERR, "Failed to set persist buffers: %d\n", rc);
goto fail_start;
}
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks. Performance might be impacted\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
if (rc) {
@@ -718,10 +715,7 @@
rc = -EINVAL;
break;
}
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks. Power might be impacted\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
if (rc)
dprintk(VIDC_ERR,
@@ -1371,10 +1365,7 @@
dprintk(VIDC_WARN,
"Failed to set frame rate %d\n", rc);
}
- if (msm_comm_scale_clocks(inst->core, inst->session_type)) {
- dprintk(VIDC_WARN,
- "Failed to scale clocks\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
}
exit:
return rc;
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index dff3272..87f53ac 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -64,7 +64,6 @@
};
static const u32 bus_table[] = {
- 0,
36000,
110400,
244800,
@@ -77,12 +76,11 @@
{
int num_rows = sizeof(bus_table)/(sizeof(u32));
int i;
- if (!load)
- return 0;
for (i = 0; i < num_rows; i++) {
if (load <= bus_table[i])
break;
}
+ i++;
dprintk(VIDC_DBG, "Required bus = %d\n", i);
return i;
}
@@ -122,37 +120,60 @@
break;
ret = table[i].freq;
}
- dprintk(VIDC_INFO, "Required clock rate = %lu\n", ret);
+ dprintk(VIDC_DBG, "Required clock rate = %lu\n", ret);
return ret;
}
-int msm_comm_scale_bus(struct msm_vidc_core *core, enum session_type type)
+static int msm_comm_scale_bus(struct msm_vidc_core *core,
+ enum session_type type, enum mem_type mtype)
{
int load;
int rc = 0;
+ u32 handle = 0;
if (!core || type >= MSM_VIDC_MAX_DEVICES) {
dprintk(VIDC_ERR, "Invalid args: %p, %d\n", core, type);
return -EINVAL;
}
load = msm_comm_get_load(core, type);
- rc = msm_bus_scale_client_update_request(
- core->resources.bus_info.ddr_handle[type],
- get_bus_vector(load));
- if (rc) {
- dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
- goto fail_scale_bus;
+ if (mtype & DDR_MEM)
+ handle = core->resources.bus_info.ddr_handle[type];
+ if (mtype & OCMEM_MEM)
+ handle = core->resources.bus_info.ocmem_handle[type];
+ if (handle) {
+ rc = msm_bus_scale_client_update_request(
+ handle, get_bus_vector(load));
+ if (rc)
+ dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
+ } else {
+ dprintk(VIDC_ERR, "Failed to scale bus, mtype: %d\n",
+ mtype);
+ rc = -EINVAL;
}
- rc = msm_bus_scale_client_update_request(
- core->resources.bus_info.ocmem_handle[type],
- get_bus_vector(load));
- if (rc) {
- dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
- goto fail_scale_bus;
- }
-fail_scale_bus:
return rc;
}
+static void msm_comm_unvote_buses(struct msm_vidc_core *core,
+ enum mem_type mtype)
+{
+ int i;
+ for (i = 0; i < MSM_VIDC_MAX_DEVICES; i++) {
+ if ((mtype & DDR_MEM) &&
+ msm_bus_scale_client_update_request(
+ core->resources.bus_info.ddr_handle[i],
+ 0)) {
+ dprintk(VIDC_WARN,
+ "Failed to unvote for DDR accesses\n");
+ }
+ if ((mtype & OCMEM_MEM) &&
+ msm_bus_scale_client_update_request(
+ core->resources.bus_info.ocmem_handle[i],
+ 0)) {
+ dprintk(VIDC_WARN,
+ "Failed to unvote for OCMEM accesses\n");
+ }
+ }
+}
+
static int protect_cp_mem(struct msm_vidc_core *core)
{
struct tzbsp_memprot memprot;
@@ -589,33 +610,28 @@
struct v4l2_event dqevent;
unsigned long flags;
if (response) {
- inst = (struct msm_vidc_inst *)response->session_id;
- dprintk(VIDC_WARN,
- "Sys error received for session %p\n", inst);
- if (inst) {
- core = inst->core;
- if (core) {
- spin_lock_irqsave(&core->lock, flags);
- core->state = VIDC_CORE_INVALID;
- spin_unlock_irqrestore(&core->lock, flags);
- dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
- dqevent.id = 0;
- list_for_each_entry(inst, &core->instances,
+ core = get_vidc_core(response->device_id);
+ dprintk(VIDC_WARN, "SYS_ERROR received for core %p\n", core);
+ if (core) {
+ spin_lock_irqsave(&core->lock, flags);
+ core->state = VIDC_CORE_INVALID;
+ spin_unlock_irqrestore(&core->lock, flags);
+ dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+ dqevent.id = 0;
+ list_for_each_entry(inst, &core->instances,
list) {
- if (inst) {
- v4l2_event_queue_fh(
- &inst->event_handler,
- &dqevent);
- spin_lock_irqsave(&inst->lock,
- flags);
- inst->state =
- MSM_VIDC_CORE_INVALID;
- spin_unlock_irqrestore(
- &inst->lock, flags);
- }
- }
- wake_up(&inst->kernel_event_queue);
+ v4l2_event_queue_fh(&inst->event_handler,
+ &dqevent);
+
+ spin_lock_irqsave(&inst->lock, flags);
+ inst->state = MSM_VIDC_CORE_INVALID;
+ spin_unlock_irqrestore(&inst->lock, flags);
+
+ wake_up(&inst->kernel_event_queue);
}
+ } else {
+ dprintk(VIDC_ERR,
+ "Got SYS_ERR but unable to identify core");
}
} else {
dprintk(VIDC_ERR,
@@ -751,6 +767,10 @@
default:
break;
}
+ inst->count.fbd++;
+ if (fill_buf_done->filled_len1)
+ msm_vidc_debugfs_update(inst,
+ MSM_VIDC_DEBUGFS_EVENT_FBD);
dprintk(VIDC_DBG, "Filled length = %d; flags %x\n",
vb->v4l2_planes[0].bytesused,
@@ -759,7 +779,6 @@
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
wake_up(&inst->kernel_event_queue);
- msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
} else {
/*
* FIXME:
@@ -879,7 +898,7 @@
}
}
-int msm_comm_scale_clocks(struct msm_vidc_core *core, enum session_type type)
+static int msm_comm_scale_clocks(struct msm_vidc_core *core)
{
int num_mbs_per_sec;
int rc = 0;
@@ -893,14 +912,8 @@
rc = clk_set_rate(core->resources.clock[VCODEC_CLK].clk,
get_clock_rate(&core->resources.clock[VCODEC_CLK],
num_mbs_per_sec));
- if (rc) {
- dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
- goto fail_clk_set_rate;
- }
- rc = msm_comm_scale_bus(core, type);
if (rc)
- dprintk(VIDC_ERR, "Failed to scale bus bandwidth\n");
-fail_clk_set_rate:
+ dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
return rc;
}
@@ -946,6 +959,28 @@
}
}
+void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
+{
+ struct msm_vidc_core *core = inst->core;
+ if (!inst) {
+ dprintk(VIDC_WARN, "Invalid params\n");
+ return;
+ }
+ if (msm_comm_scale_clocks(core)) {
+ dprintk(VIDC_WARN,
+ "Failed to scale clocks. Performance might be impacted\n");
+ }
+ if (msm_comm_scale_bus(core, inst->session_type, DDR_MEM)) {
+ dprintk(VIDC_WARN,
+ "Failed to scale DDR bus. Performance might be impacted\n");
+ }
+ if (core->resources.ocmem.buf) {
+ if (msm_comm_scale_bus(core, inst->session_type, OCMEM_MEM))
+ dprintk(VIDC_WARN,
+ "Failed to scale OCMEM bus. Performance might be impacted\n");
+ }
+}
+
static int msm_comm_load_fw(struct msm_vidc_core *core)
{
int rc = 0;
@@ -953,25 +988,28 @@
dprintk(VIDC_ERR, "Invalid paramter: %p\n", core);
return -EINVAL;
}
-
if (!core->resources.fw.cookie)
core->resources.fw.cookie = subsystem_get("venus");
if (IS_ERR_OR_NULL(core->resources.fw.cookie)) {
dprintk(VIDC_ERR, "Failed to download firmware\n");
rc = -ENOMEM;
- goto fail_subsystem_get;
+ goto fail_load_fw;
}
+ /*Clocks can be enabled only after pil_get since
+ * gdsc is turned-on in pil_get*/
rc = msm_comm_enable_clks(core);
if (rc) {
dprintk(VIDC_ERR, "Failed to enable clocks: %d\n", rc);
goto fail_enable_clks;
}
+
rc = protect_cp_mem(core);
if (rc) {
dprintk(VIDC_ERR, "Failed to protect memory\n");
goto fail_iommu_attach;
}
+
rc = msm_comm_iommu_attach(core);
if (rc) {
dprintk(VIDC_ERR, "Failed to attach iommu");
@@ -983,7 +1021,7 @@
fail_enable_clks:
subsystem_put(core->resources.fw.cookie);
core->resources.fw.cookie = NULL;
-fail_subsystem_get:
+fail_load_fw:
return rc;
}
@@ -1186,16 +1224,23 @@
core->id, core->state);
goto core_already_inited;
}
- rc = msm_comm_scale_clocks(core, inst->session_type);
- if (rc) {
- dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
- goto fail_load_fw;
- }
+
rc = msm_comm_load_fw(core);
if (rc) {
dprintk(VIDC_ERR, "Failed to load video firmware\n");
goto fail_load_fw;
}
+ rc = msm_comm_scale_clocks(core);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to scale clocks: %d\n", rc);
+ goto fail_core_init;
+ }
+
+ rc = msm_comm_scale_bus(core, inst->session_type, DDR_MEM);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to scale DDR bus: %d\n", rc);
+ goto fail_core_init;
+ }
init_completion(&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)]);
rc = vidc_hal_core_init(core->device,
core->resources.io_map[NS_MAP].domain);
@@ -1212,6 +1257,7 @@
return rc;
fail_core_init:
msm_comm_unload_fw(core);
+ msm_comm_unvote_buses(core, DDR_MEM);
fail_load_fw:
mutex_unlock(&core->sync_lock);
return rc;
@@ -1228,10 +1274,7 @@
core->id, core->state);
goto core_already_uninited;
}
- if (msm_comm_scale_clocks(core, inst->session_type)) {
- dprintk(VIDC_WARN, "Failed to scale clocks while closing\n");
- dprintk(VIDC_INFO, "Power might be impacted\n");
- }
+ msm_comm_scale_clocks_and_bus(inst);
if (list_empty(&core->instances)) {
msm_comm_unset_ocmem(core);
msm_comm_free_ocmem(core);
@@ -1246,6 +1289,7 @@
core->state = VIDC_CORE_UNINIT;
spin_unlock_irqrestore(&core->lock, flags);
msm_comm_unload_fw(core);
+ msm_comm_unvote_buses(core, DDR_MEM|OCMEM_MEM);
}
core_already_uninited:
change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
@@ -1364,11 +1408,18 @@
goto exit;
}
ocmem_sz = get_ocmem_requirement(inst->prop.height, inst->prop.width);
- rc = msm_comm_alloc_ocmem(inst->core, ocmem_sz);
- if (rc)
- dprintk(VIDC_WARN,
+ rc = msm_comm_scale_bus(inst->core, inst->session_type, OCMEM_MEM);
+ if (!rc) {
+ rc = msm_comm_alloc_ocmem(inst->core, ocmem_sz);
+ if (rc) {
+ dprintk(VIDC_WARN,
"Failed to allocate OCMEM. Performance will be impacted\n");
-
+ msm_comm_unvote_buses(inst->core, OCMEM_MEM);
+ }
+ } else {
+ dprintk(VIDC_WARN,
+ "Failed to vote for OCMEM BW. Performance will be impacted\n");
+ }
rc = vidc_hal_session_load_res((void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -1472,6 +1523,25 @@
return rc;
}
+static int get_flipped_state(int present_state,
+ int desired_state)
+{
+ int flipped_state = present_state;
+ if (flipped_state < MSM_VIDC_STOP
+ && desired_state > MSM_VIDC_STOP) {
+ flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
+ flipped_state &= 0xFFFE;
+ flipped_state = flipped_state - 1;
+ } else if (flipped_state > MSM_VIDC_STOP
+ && desired_state < MSM_VIDC_STOP) {
+ flipped_state = MSM_VIDC_STOP -
+ (flipped_state - MSM_VIDC_STOP + 1);
+ flipped_state &= 0xFFFE;
+ flipped_state = flipped_state - 1;
+ }
+ return flipped_state;
+}
+
int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
{
int rc = 0;
@@ -1498,89 +1568,77 @@
"Core is in bad state can't change the state");
goto exit;
}
- flipped_state = inst->state;
- if (flipped_state < MSM_VIDC_STOP
- && state > MSM_VIDC_STOP) {
- flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
- flipped_state &= 0xFFFE;
- flipped_state = flipped_state - 1;
- } else if (flipped_state > MSM_VIDC_STOP
- && state < MSM_VIDC_STOP) {
- flipped_state = MSM_VIDC_STOP -
- (flipped_state - MSM_VIDC_STOP + 1);
- flipped_state &= 0xFFFE;
- flipped_state = flipped_state - 1;
- }
+ flipped_state = get_flipped_state(inst->state, state);
dprintk(VIDC_DBG,
"flipped_state = 0x%x\n", flipped_state);
switch (flipped_state) {
case MSM_VIDC_CORE_UNINIT_DONE:
case MSM_VIDC_CORE_INIT:
rc = msm_comm_init_core(inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_CORE_INIT_DONE:
rc = msm_comm_init_core_done(inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_OPEN:
rc = msm_comm_session_init(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_OPEN_DONE:
rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE,
SESSION_INIT_DONE);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_LOAD_RESOURCES:
rc = msm_vidc_load_resources(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_LOAD_RESOURCES_DONE:
case MSM_VIDC_START:
rc = msm_vidc_start(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_START_DONE:
rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE,
SESSION_START_DONE);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_STOP:
rc = msm_vidc_stop(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_STOP_DONE:
rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE,
SESSION_STOP_DONE);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
dprintk(VIDC_DBG, "Moving to Stop Done state\n");
case MSM_VIDC_RELEASE_RESOURCES:
rc = msm_vidc_release_res(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_RELEASE_RESOURCES_DONE:
rc = wait_for_state(inst, flipped_state,
MSM_VIDC_RELEASE_RESOURCES_DONE,
SESSION_RELEASE_RESOURCE_DONE);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
dprintk(VIDC_DBG,
"Moving to release resources done state\n");
case MSM_VIDC_CLOSE:
rc = msm_comm_session_close(flipped_state, inst);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_CLOSE_DONE:
rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE,
SESSION_END_DONE);
- if (rc || state <= inst->state)
+ if (rc || state <= get_flipped_state(inst->state, state))
break;
case MSM_VIDC_CORE_UNINIT:
dprintk(VIDC_DBG, "Sending core uninit\n");
rc = msm_vidc_deinit_core(inst);
- if (rc || state == inst->state)
+ if (rc || state == get_flipped_state(inst->state, state))
break;
default:
dprintk(VIDC_ERR, "State not recognized\n");
@@ -2131,3 +2189,61 @@
mutex_unlock(&inst->sync_lock);
return rc;
}
+
+
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+ enum v4l2_mpeg_vidc_extradata index)
+{
+ int ret = 0;
+ switch (index) {
+ case V4L2_MPEG_VIDC_EXTRADATA_NONE:
+ ret = HAL_EXTRADATA_NONE;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+ ret = HAL_EXTRADATA_MB_QUANTIZATION;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+ ret = HAL_EXTRADATA_INTERLACE_VIDEO;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP:
+ ret = HAL_EXTRADATA_VC1_FRAMEDISP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP:
+ ret = HAL_EXTRADATA_VC1_SEQDISP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+ ret = HAL_EXTRADATA_TIMESTAMP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+ ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+ ret = HAL_EXTRADATA_FRAME_RATE;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+ ret = HAL_EXTRADATA_PANSCAN_WINDOW;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+ ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD:
+ ret = HAL_EXTRADATA_CLOSED_CAPTION_UD;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_AFD_UD:
+ ret = HAL_EXTRADATA_AFD_UD;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+ ret = HAL_EXTRADATA_MULTISLICE_INFO;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+ ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+ ret = HAL_EXTRADATA_METADATA_FILLER;
+ break;
+ default:
+ dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
+ break;
+ }
+ return ret;
+};
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 7562058..916a3ca 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -32,7 +32,7 @@
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
int msm_comm_qbuf(struct vb2_buffer *vb);
-int msm_comm_scale_clocks(struct msm_vidc_core *core, enum session_type type);
+void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst);
int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
@@ -41,6 +41,8 @@
enum instance_state state);
int msm_comm_unset_ocmem(struct msm_vidc_core *core);
int msm_comm_free_ocmem(struct msm_vidc_core *core);
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+ enum v4l2_mpeg_vidc_extradata index);
#define IS_PRIV_CTRL(idx) (\
(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
V4L2_CTRL_DRIVER_PRIV(idx))
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.c b/drivers/media/video/msm_vidc/msm_vidc_debug.c
index 914c422..f91d0dd 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.c
@@ -202,28 +202,27 @@
switch (e) {
case MSM_VIDC_DEBUGFS_EVENT_ETB:
inst->count.etb++;
- if (inst->count.ftb > inst->count.fbd) {
+ if (inst->count.ebd && inst->count.ftb > inst->count.fbd) {
d->pdata[FRAME_PROCESSING].name[0] = '\0';
tic(inst, FRAME_PROCESSING, a);
}
break;
case MSM_VIDC_DEBUGFS_EVENT_EBD:
inst->count.ebd++;
- if (inst->count.ebd == inst->count.etb)
+ if (inst->count.ebd && inst->count.ebd == inst->count.etb)
toc(inst, FRAME_PROCESSING);
break;
case MSM_VIDC_DEBUGFS_EVENT_FTB: {
inst->count.ftb++;
- if (inst->count.etb > inst->count.ebd) {
+ if (inst->count.ebd && inst->count.etb > inst->count.ebd) {
d->pdata[FRAME_PROCESSING].name[0] = '\0';
tic(inst, FRAME_PROCESSING, a);
}
}
break;
case MSM_VIDC_DEBUGFS_EVENT_FBD:
- inst->count.fbd++;
- inst->debug.counter++;
- if (inst->count.fbd == inst->count.ftb)
+ inst->debug.samples++;
+ if (inst->count.ebd && inst->count.fbd == inst->count.ftb)
toc(inst, FRAME_PROCESSING);
break;
default:
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.h b/drivers/media/video/msm_vidc/msm_vidc_debug.h
index 1a51173..995daf0 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.h
@@ -81,23 +81,8 @@
do_gettimeofday(&__ddl_tv);
i->debug.pdata[p].stop = (__ddl_tv.tv_sec * 1000)
+ (__ddl_tv.tv_usec / 1000);
- i->debug.pdata[p].cumulative =
+ i->debug.pdata[p].cumulative +=
(i->debug.pdata[p].stop - i->debug.pdata[p].start);
- if (i->count.fbd) {
- if (i->debug.pdata[p].average != 0) {
- i->debug.pdata[p].average = ((i->debug.pdata[p].
- average * (i->count.fbd -
- i->debug.counter) +
- i->debug.pdata[p].cumulative)
- / i->count.fbd);
- } else {
- i->debug.pdata[p].average =
- i->debug.pdata[p].cumulative
- / i->count.fbd;
- }
- }
- i->debug.counter = 0;
- i->debug.pdata[p].cumulative = 0;
i->debug.pdata[p].sampling = true;
}
}
@@ -110,9 +95,11 @@
(msm_vidc_debug & VIDC_PROF)) {
dprintk(VIDC_PROF, "%s averaged %d ms/sample\n",
i->debug.pdata[x].name,
- i->debug.pdata[x].average);
+ i->debug.pdata[x].cumulative /
+ i->debug.samples);
dprintk(VIDC_PROF, "%s Samples: %d",
- i->debug.pdata[x].name, i->count.fbd);
+ i->debug.pdata[x].name,
+ i->debug.samples);
}
}
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index 5b2cced..b274d13 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -134,6 +134,11 @@
u32 freq;
};
+enum mem_type {
+ DDR_MEM = 0x1,
+ OCMEM_MEM = 0x2,
+};
+
struct core_clock {
char name[MAX_NAME_LENGTH];
struct clk *clk;
@@ -200,7 +205,7 @@
struct msm_vidc_debug {
struct profile_data pdata[MAX_PROFILING_POINTS];
int profile;
- int counter;
+ int samples;
};
struct msm_vidc_ssr_info {
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index f44be4d..e449821 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -972,6 +972,61 @@
}
return buffer;
}
+
+
+static int get_hfi_extradata_index(enum hal_extradata_id index)
+{
+ int ret = 0;
+ switch (index) {
+ case HAL_EXTRADATA_MB_QUANTIZATION:
+ ret = HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION;
+ break;
+ case HAL_EXTRADATA_INTERLACE_VIDEO:
+ ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VC1_FRAMEDISP:
+ ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VC1_SEQDISP:
+ ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_TIMESTAMP:
+ ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_S3D_FRAME_PACKING:
+ ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_FRAME_RATE:
+ ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_PANSCAN_WINDOW:
+ ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_RECOVERY_POINT_SEI:
+ ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_CLOSED_CAPTION_UD:
+ ret = HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_AFD_UD:
+ ret = HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_MULTISLICE_INFO:
+ ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
+ break;
+ case HAL_EXTRADATA_NUM_CONCEALED_MB:
+ ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
+ break;
+ case HAL_EXTRADATA_INDEX:
+ ret = HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG;
+ break;
+ default:
+ dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
+ break;
+ }
+ return ret;
+}
+
int vidc_hal_session_set_property(void *sess,
enum hal_property ptype, void *pdata)
{
@@ -979,6 +1034,7 @@
struct hfi_cmd_session_set_property_packet *pkt =
(struct hfi_cmd_session_set_property_packet *) &packet;
struct hal_session *session;
+ int rc = 0;
if (!sess || !pdata) {
dprintk(VIDC_ERR, "Invalid Params");
@@ -1528,6 +1584,30 @@
hfi_multi_slice_control);
break;
}
+ case HAL_PARAM_INDEX_EXTRADATA:
+ {
+ struct hfi_index_extradata_config *hfi;
+ struct hal_extradata_enable *extra = pdata;
+ int index = 0;
+ pkt->rg_property_data[0] =
+ get_hfi_extradata_index(extra->index);
+ hfi =
+ (struct hfi_index_extradata_config *)
+ &pkt->rg_property_data[1];
+ hfi->enable = extra->enable;
+ index = get_hfi_extradata_index(extra->index);
+ if (index)
+ hfi->index_extra_data_id = index;
+ else {
+ dprintk(VIDC_WARN,
+ "Failed to find extradata index: %d\n",
+ index);
+ rc = -EINVAL;
+ }
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_index_extradata_config);
+ break;
+ }
case HAL_CONFIG_VPE_DEINTERLACE:
break;
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
@@ -1562,9 +1642,10 @@
dprintk(VIDC_INFO, "DEFAULT: Calling 0x%x", ptype);
break;
}
- if (vidc_hal_iface_cmdq_write(session->device, pkt))
- return -ENOTEMPTY;
- return 0;
+ if (!rc)
+ rc = vidc_hal_iface_cmdq_write(session->device, pkt);
+
+ return rc;
}
int vidc_hal_session_get_property(void *sess,
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 8aff5af..9d20a31 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -72,12 +72,32 @@
VIDC_ERR_UNUSED = 0x10000000
};
+enum hal_extradata_id {
+ HAL_EXTRADATA_NONE,
+ HAL_EXTRADATA_MB_QUANTIZATION,
+ HAL_EXTRADATA_INTERLACE_VIDEO,
+ HAL_EXTRADATA_VC1_FRAMEDISP,
+ HAL_EXTRADATA_VC1_SEQDISP,
+ HAL_EXTRADATA_TIMESTAMP,
+ HAL_EXTRADATA_S3D_FRAME_PACKING,
+ HAL_EXTRADATA_FRAME_RATE,
+ HAL_EXTRADATA_PANSCAN_WINDOW,
+ HAL_EXTRADATA_RECOVERY_POINT_SEI,
+ HAL_EXTRADATA_CLOSED_CAPTION_UD,
+ HAL_EXTRADATA_AFD_UD,
+ HAL_EXTRADATA_MULTISLICE_INFO,
+ HAL_EXTRADATA_INDEX,
+ HAL_EXTRADATA_NUM_CONCEALED_MB,
+ HAL_EXTRADATA_METADATA_FILLER,
+};
+
enum hal_property {
HAL_CONFIG_FRAME_RATE = 0x04000001,
HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
+ HAL_PARAM_INDEX_EXTRADATA,
HAL_PARAM_FRAME_SIZE,
HAL_CONFIG_REALTIME,
HAL_PARAM_BUFFER_COUNT_ACTUAL,
@@ -460,6 +480,11 @@
HAL_UNUSED_PICT = 0x10000000,
};
+struct hal_extradata_enable {
+ u32 enable;
+ enum hal_extradata_id index;
+};
+
struct hal_enable_picture {
u32 picture_type;
};
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 200f5d3..8231bd4 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -154,11 +154,14 @@
cmd_done.device_id = device->device_id;
device->callback(SYS_ERROR, &cmd_done);
}
-static void hal_process_session_error(struct hal_device *device)
+static void hal_process_session_error(struct hal_device *device,
+ struct hfi_msg_event_notify_packet *pkt)
{
struct msm_vidc_cb_cmd_done cmd_done;
memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
cmd_done.device_id = device->device_id;
+ cmd_done.session_id = ((struct hal_session *) pkt->session_id)->
+ session_id;
device->callback(SESSION_ERROR, &cmd_done);
}
static void hal_process_event_notify(struct hal_device *device,
@@ -179,7 +182,7 @@
break;
case HFI_EVENT_SESSION_ERROR:
dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
- hal_process_session_error(device);
+ hal_process_session_error(device, pkt);
break;
case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED");
diff --git a/drivers/media/video/msm_wfd/enc-mfc-subdev.c b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
index d839be3..45532a9 100644
--- a/drivers/media/video/msm_wfd/enc-mfc-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
@@ -23,6 +23,8 @@
#define VID_ENC_MAX_ENCODER_CLIENTS 1
#define MAX_NUM_CTRLS 20
+#define V4L2_FRAME_FLAGS (V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | V4L2_QCOM_BUF_FLAG_CODECCONFIG)
static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg);
@@ -179,6 +181,7 @@
vbuf->v4l2_planes[0].bytesused =
frame_data->data_len;
+ vbuf->v4l2_buf.flags &= ~(V4L2_FRAME_FLAGS);
switch (frame_data->frame) {
case VCD_FRAME_I:
case VCD_FRAME_IDR:
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 82f9e58..5161b7b 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -396,6 +396,7 @@
if (rc == 0 && atomic_read(&dev->vp_enabled) == 1) {
/* This should not happen, if it does hw is stuck */
disable_irq_nosync(dev->vpirq->start);
+ atomic_set(&dev->vp_enabled, 0);
pr_err("%s: VP Timeout and VP still running\n",
__func__);
}
diff --git a/drivers/mfd/pm8821-core.c b/drivers/mfd/pm8821-core.c
index 86bd5ec..fe3e67e 100644
--- a/drivers/mfd/pm8821-core.c
+++ b/drivers/mfd/pm8821-core.c
@@ -312,11 +312,11 @@
drvdata = platform_get_drvdata(pdev);
if (drvdata)
pmic = drvdata->pm_chip_data;
- if (pmic)
- mfd_remove_devices(pmic->dev);
- if (pmic->irq_chip) {
- pm8821_irq_exit(pmic->irq_chip);
- pmic->irq_chip = NULL;
+ if (pmic) {
+ if (pmic->dev)
+ mfd_remove_devices(pmic->dev);
+ if (pmic->irq_chip)
+ pm8821_irq_exit(pmic->irq_chip);
}
platform_set_drvdata(pdev, NULL);
kfree(pmic);
diff --git a/drivers/mfd/pm8xxx-misc.c b/drivers/mfd/pm8xxx-misc.c
index 6bb1441..fb57bd0 100644
--- a/drivers/mfd/pm8xxx-misc.c
+++ b/drivers/mfd/pm8xxx-misc.c
@@ -671,8 +671,7 @@
voltage = chg_config->voltage;
resistor = chg_config->resistor;
- if (resistor < PM8XXX_COINCELL_RESISTOR_2100_OHMS ||
- resistor > PM8XXX_COINCELL_RESISTOR_800_OHMS) {
+ if (resistor > PM8XXX_COINCELL_RESISTOR_800_OHMS) {
pr_err("Invalid resistor value provided\n");
return -EINVAL;
}
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 1f7b67a..fa7c116 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -35,6 +35,8 @@
#define MAX_WCD9XXX_DEVICE 4
#define TABLA_I2C_MODE 0x03
#define SITAR_I2C_MODE 0x01
+#define CODEC_DT_MAX_PROP_SIZE 40
+#define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d"
struct wcd9xxx_i2c {
struct i2c_client *client;
@@ -43,6 +45,17 @@
int mod_id;
};
+static char *taiko_supplies[] = {
+ "cdc-vdd-buck", "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
+ "cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
+};
+
+static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
+ struct wcd9xxx_regulator *vreg, const char *vreg_name);
+static int wcd9xxx_dt_parse_micbias_info(struct device *dev,
+ struct wcd9xxx_micbias_setting *micbias);
+static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev);
+
struct wcd9xxx_i2c wcd9xxx_modules[MAX_WCD9XXX_DEVICE];
static int wcd9xxx_intf = -1;
@@ -764,19 +777,31 @@
int ret = 0;
int i2c_mode = 0;
static int device_id;
+ struct device *dev;
pr_info("%s\n", __func__);
if (wcd9xxx_intf == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- pr_info("tabla card is already detected in slimbus mode\n");
+ dev_dbg(&client->dev, "%s:Codec is detected in slimbus mode\n",
+ __func__);
return -ENODEV;
}
- pdata = client->dev.platform_data;
if (device_id > 0) {
wcd9xxx_modules[device_id++].client = client;
- pr_info("probe for other slaves devices of tabla\n");
+ dev_dbg(&client->dev, "%s:probe for other slaves\n"
+ "devices of codec\n", __func__);
return ret;
}
-
+ dev = &client->dev;
+ if (client->dev.of_node) {
+ dev_dbg(&client->dev, "%s:Platform data from device tree\n",
+ __func__);
+ pdata = wcd9xxx_populate_dt_pdata(&client->dev);
+ client->dev.platform_data = pdata;
+ } else {
+ dev_dbg(&client->dev, "%s:Platform data from board file\n",
+ __func__);
+ pdata = client->dev.platform_data;
+ }
wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL);
if (wcd9xxx == NULL) {
pr_err("%s: error, allocation failed\n", __func__);
@@ -858,7 +883,6 @@
return 0;
}
-#define CODEC_DT_MAX_PROP_SIZE 40
static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
struct wcd9xxx_regulator *vreg, const char *vreg_name)
{
@@ -1057,11 +1081,6 @@
return 0;
}
-static char *taiko_supplies[] = {
- "cdc-vdd-buck", "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
- "cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
-};
-
static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev)
{
struct wcd9xxx_pdata *pdata;
@@ -1071,12 +1090,11 @@
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
- dev_err(dev,
- "could not allocate memory for platform data\n");
+ dev_err(dev, "could not allocate memory for platform data\n");
return NULL;
}
-
- if (!strcmp(dev_name(dev), "taiko-slim-pgd")) {
+ if (!strcmp(dev_name(dev), "taiko-slim-pgd") ||
+ (!strcmp(dev_name(dev), WCD9XXX_I2C_GSBI_SLAVE_ID))) {
codec_supplies = taiko_supplies;
num_of_supplies = ARRAY_SIZE(taiko_supplies);
} else {
@@ -1111,11 +1129,7 @@
pdata->reset_gpio);
goto err;
}
-
- ret = wcd9xxx_dt_parse_slim_interface_dev_info(dev,
- &pdata->slimbus_slave_device);
- if (ret)
- goto err;
+ dev_dbg(dev, "%s: reset gpio %d", __func__, pdata->reset_gpio);
return pdata;
err:
devm_kfree(dev, pdata);
@@ -1151,6 +1165,14 @@
if (slim->dev.of_node) {
dev_info(&slim->dev, "Platform data from device tree\n");
pdata = wcd9xxx_populate_dt_pdata(&slim->dev);
+ ret = wcd9xxx_dt_parse_slim_interface_dev_info(&slim->dev,
+ &pdata->slimbus_slave_device);
+ if (ret) {
+ dev_err(&slim->dev, "Error, parsing slim interface\n");
+ devm_kfree(&slim->dev, pdata);
+ ret = -EINVAL;
+ goto err;
+ }
slim->dev.platform_data = pdata;
} else {
@@ -1460,6 +1482,14 @@
#define WCD9XXX_I2C_DIGITAL_1 2
#define WCD9XXX_I2C_DIGITAL_2 3
+static struct i2c_device_id wcd9xxx_id_table[] = {
+ {"wcd9xxx-i2c", WCD9XXX_I2C_TOP_LEVEL},
+ {"wcd9xxx-i2c", WCD9XXX_I2C_ANALOG},
+ {"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_1},
+ {"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_2},
+ {}
+};
+
static struct i2c_device_id tabla_id_table[] = {
{"tabla top level", WCD9XXX_I2C_TOP_LEVEL},
{"tabla analog", WCD9XXX_I2C_ANALOG},
@@ -1481,9 +1511,22 @@
.suspend = wcd9xxx_i2c_suspend,
};
+static struct i2c_driver wcd9xxx_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "wcd9xxx-i2c-core",
+ },
+ .id_table = wcd9xxx_id_table,
+ .probe = wcd9xxx_i2c_probe,
+ .remove = __devexit_p(wcd9xxx_i2c_remove),
+ .resume = wcd9xxx_i2c_resume,
+ .suspend = wcd9xxx_i2c_suspend,
+};
+
+
static int __init wcd9xxx_init(void)
{
- int ret1, ret2, ret3, ret4, ret5, ret6;
+ int ret1, ret2, ret3, ret4, ret5, ret6, ret7;
ret1 = slim_driver_register(&tabla_slim_driver);
if (ret1 != 0)
@@ -1495,7 +1538,7 @@
ret3 = i2c_add_driver(&tabla_i2c_driver);
if (ret3 != 0)
- pr_err("failed to add the I2C driver\n");
+ pr_err("failed to add the tabla2x I2C driver\n");
ret4 = slim_driver_register(&sitar_slim_driver);
if (ret4 != 0)
@@ -1509,7 +1552,11 @@
if (ret6 != 0)
pr_err("Failed to register taiko SB driver: %d\n", ret6);
- return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6) ? -1 : 0;
+ ret7 = i2c_add_driver(&wcd9xxx_i2c_driver);
+ if (ret7 != 0)
+ pr_err("failed to add the wcd9xxx I2C driver\n");
+
+ return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6 && ret7) ? -1 : 0;
}
module_init(wcd9xxx_init);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 46015b0..3715417 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -49,6 +49,9 @@
#define QSEOS_VERSION_13 0x13
#define QSEOS_VERSION_14 0x14
#define QSEEE_VERSION_00 0x400000
+#define QSEE_VERSION_01 0x401000
+#define QSEE_VERSION_02 0x402000
+
#define QSEOS_CHECK_VERSION_CMD 0x00001803
@@ -75,6 +78,7 @@
QSEOS_GET_APP_STATE_COMMAND,
QSEOS_LOAD_SERV_IMAGE_COMMAND,
QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+ QSEOS_APP_REGION_NOTIFICATION,
QSEOS_CMD_MAX = 0xEFFFFFFF
};
@@ -89,6 +93,12 @@
CLK_SFPB,
};
+__packed struct qsee_apps_region_info_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t addr;
+ uint32_t size;
+};
+
__packed struct qseecom_check_app_ireq {
uint32_t qsee_cmd_id;
char app_name[MAX_APP_NAME_SIZE];
@@ -680,6 +690,7 @@
ion_phys_addr_t pa = 0;
uint32_t len;
struct qseecom_command_scm_resp resp;
+ struct qseecom_check_app_ireq req;
struct qseecom_load_app_ireq load_req;
/* Copy the relevant information needed for loading the image */
@@ -693,88 +704,112 @@
ret = qsee_vote_for_clock(CLK_SFPB);
if (ret)
pr_warning("Unable to vote for SFPB clock");
+ req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+ memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
- pr_warn("App (%s) does not exist, loading apps for first time\n",
+ ret = __qseecom_check_app_exists(req);
+ if (ret < 0)
+ return ret;
+ else
+ app_id = ret;
+
+ if (app_id) {
+ pr_warn("App id %d (%s) already exists\n", app_id,
+ (char *)(req.app_name));
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == app_id) {
+ entry->ref_cnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ } else {
+ pr_warn("App (%s) does'nt exist, loading apps for first time\n",
(char *)(load_img_req.img_name));
- /* Get the handle of the shared fd */
- ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ /* Get the handle of the shared fd */
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
- if (IS_ERR_OR_NULL(ihandle)) {
- pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
- /* Get the physical address of the ION BUF */
- ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
- /* Populate the structure for sending scm call to load image */
- memcpy(load_req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
- load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
- load_req.mdt_len = load_img_req.mdt_len;
- load_req.img_len = load_img_req.img_len;
- load_req.phy_addr = pa;
+ /* Populate the structure for sending scm call to load image */
+ memcpy(load_req.app_name, load_img_req.img_name,
+ MAX_APP_NAME_SIZE);
+ load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req.mdt_len = load_img_req.mdt_len;
+ load_req.img_len = load_img_req.img_len;
+ load_req.phy_addr = pa;
- /* SCM_CALL to load the app and get the app_id back */
- ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
+ /* SCM_CALL to load the app and get the app_id back */
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
sizeof(struct qseecom_load_app_ireq),
&resp, sizeof(resp));
- if (ret) {
- pr_err("scm_call to load app failed\n");
- return -EINVAL;
- }
-
- if (resp.result == QSEOS_RESULT_FAILURE) {
- pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
-
- if (resp.result == QSEOS_RESULT_INCOMPLETE) {
- ret = __qseecom_process_incomplete_cmd(data, &resp);
if (ret) {
- pr_err("process_incomplete_cmd failed err: %d\n",
- ret);
+ pr_err("scm_call to load app failed\n");
+ return -EINVAL;
+ }
+
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
qsee_disable_clock_vote(CLK_SFPB);
- return ret;
+ return -EFAULT;
}
- }
- if (resp.result != QSEOS_RESULT_SUCCESS) {
- pr_err("scm_call failed resp.result unknown, %d\n",
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return ret;
+ }
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("scm_call failed resp.result unknown, %d\n",
resp.result);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -EFAULT;
+ }
+
+ app_id = resp.data;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("kmalloc failed\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+
+ /* Deallocate the handle */
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
- app_id = resp.data;
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
- entry->app_id = app_id;
- entry->ref_cnt = 1;
-
- /* Deallocate the handle */
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
-
- spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
- list_add_tail(&entry->list, &qseecom.registered_app_list_head);
- spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
-
- pr_warn("App with id %d (%s) now loaded\n", app_id,
+ pr_warn("App with id %d (%s) now loaded\n", app_id,
(char *)(load_img_req.img_name));
-
+ }
data->client.app_id = app_id;
load_img_req.app_id = app_id;
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
@@ -1531,18 +1566,18 @@
if (ret)
return -EIO;
- *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
- if (!(*handle)) {
- pr_err("failed to allocate memory for kernel client handle\n");
- return -ENOMEM;
- }
-
app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
memcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(app_ireq);
if (ret < 0)
return -EINVAL;
+ *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+ if (!(*handle)) {
+ pr_err("failed to allocate memory for kernel client handle\n");
+ return -ENOMEM;
+ }
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
pr_err("kmalloc failed\n");
@@ -2558,6 +2593,32 @@
}
qseecom_platform_support = (struct msm_bus_scale_pdata *)
msm_bus_cl_get_pdata(pdev);
+ if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
+ struct resource *resource = NULL;
+ struct qsee_apps_region_info_ireq req;
+ struct qseecom_command_scm_resp resp;
+
+ resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "secapp-region");
+ if (resource) {
+ req.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
+ req.addr = resource->start;
+ req.size = resource_size(resource);
+ pr_warn("secure app region addr=0x%x size=0x%x",
+ req.addr, req.size);
+ } else {
+ pr_err("Fail to get secure app region info\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (rc) {
+ pr_err("Failed to send secapp region info %d\n",
+ rc);
+ goto err;
+ }
+ }
} else {
qseecom_platform_support = (struct msm_bus_scale_pdata *)
pdev->dev.platform_data;
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 3b678c5..f310524 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -327,10 +327,15 @@
int data_inverse;
int sync_inverse;
int enable_inverse;
+ u32 tsif_irq;
/* debugfs */
struct dentry *dent_tsif;
struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+ u32 stat_rx;
+ u32 stat_overflow;
+ u32 stat_lost_sync;
+ u32 stat_timeout;
};
enum tspp_buf_state {
@@ -480,6 +485,49 @@
dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+ struct tspp_tsif_device *tsif_device = dev;
+ u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+ if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC |
+ TSIF_STS_CTL_TIMEOUT)))
+ return IRQ_NONE;
+
+ if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+ tsif_device->stat_overflow++;
+
+ if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+ tsif_device->stat_lost_sync++;
+
+ if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+ tsif_device->stat_timeout++;
+
+ iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
wmb();
return IRQ_HANDLED;
}
@@ -527,6 +575,11 @@
channel->waiting->filled = iovec.size;
channel->waiting->read_index = 0;
+ if (channel->src == TSPP_SOURCE_TSIF0)
+ device->tsif[0].stat_rx++;
+ else if (channel->src == TSPP_SOURCE_TSIF1)
+ device->tsif[1].stat_rx++;
+
/* update the pointers */
channel->waiting = channel->waiting->next;
}
@@ -2326,6 +2379,31 @@
base + debugfs_tsif_regs[i].offset,
&fops_iomem_x32);
}
+
+ debugfs_create_u32(
+ "stat_rx_chunks",
+ S_IRUGO|S_IWUGO,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_rx);
+
+ debugfs_create_u32(
+ "stat_overflow",
+ S_IRUGO|S_IWUGO,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_overflow);
+
+ debugfs_create_u32(
+ "stat_lost_sync",
+ S_IRUGO|S_IWUGO,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_lost_sync);
+
+ debugfs_create_u32(
+ "stat_timeout",
+ S_IRUGO|S_IWUGO,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_timeout);
+
}
}
@@ -2504,6 +2582,21 @@
goto err_irq;
}
+ /* map TSIF IRQs */
+ device->tsif[0].tsif_irq = TSIF1_IRQ;
+ device->tsif[1].tsif_irq = TSIF2_IRQ;
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ rc = request_irq(device->tsif[i].tsif_irq,
+ tsif_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), &device->tsif[i]);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to request TSIF%d IRQ: %d",
+ i, rc);
+ device->tsif[i].tsif_irq = 0;
+ }
+ }
+
/* BAM IRQ */
device->bam_irq = TSIF_BAM_IRQ;
@@ -2635,8 +2728,11 @@
sps_deregister_bam_device(device->bam_handle);
- for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
tsif_debugfs_exit(&device->tsif[i]);
+ if (device->tsif[i].tsif_irq)
+ free_irq(device->tsif[i].tsif_irq, &device->tsif[i]);
+ }
wake_lock_destroy(&device->wake_lock);
free_irq(device->tspp_irq, device);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 33f0600..a1bea00 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -80,6 +80,7 @@
config MMC_BLOCK_TEST
tristate "MMC block test"
depends on MMC_BLOCK && IOSCHED_TEST
+ default y
help
MMC block test can be used with test iosched to test the MMC block
device.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 8810b46..fde13c7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -298,13 +298,33 @@
{
int value;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int ret = count;
+
+ if (!card) {
+ ret = -EINVAL;
+ goto exit;
+ }
sscanf(buf, "%d", &value);
- if (value >= 0)
- md->queue.num_wr_reqs_to_start_packing = value;
+ if (value >= 0) {
+ md->queue.num_wr_reqs_to_start_packing =
+ min_t(int, value, (int)card->ext_csd.max_packed_writes);
+
+ pr_debug("%s: trigger to pack: new value = %d",
+ mmc_hostname(card->host),
+ md->queue.num_wr_reqs_to_start_packing);
+ } else {
+ pr_err("%s: value %d is not valid. old value remains = %d",
+ mmc_hostname(card->host), value,
+ md->queue.num_wr_reqs_to_start_packing);
+ ret = -EINVAL;
+ }
+
+exit:
mmc_blk_put(md);
- return count;
+ return ret;
}
static ssize_t
@@ -317,13 +337,13 @@
int ret;
if (!card)
- return -EINVAL;
-
- min_sectors_to_check_bkops_status =
- card->bkops_info.min_sectors_to_queue_delayed_work;
-
- ret = snprintf(buf, PAGE_SIZE, "%d\n",
- min_sectors_to_check_bkops_status);
+ ret = -EINVAL;
+ else {
+ min_sectors_to_check_bkops_status =
+ card->bkops_info.min_sectors_to_queue_delayed_work;
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ min_sectors_to_check_bkops_status);
+ }
mmc_blk_put(md);
return ret;
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index c5551b8..610a822 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -554,105 +554,105 @@
return NULL;
}
- switch (td->test_info.testcase) {
+switch (td->test_info.testcase) {
case TEST_STOP_DUE_TO_FLUSH:
- return " stop due to flush";
+ return "\"stop due to flush\"";
case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
- return " stop due to flush after max-1 reqs";
+ return "\"stop due to flush after max-1 reqs\"";
case TEST_STOP_DUE_TO_READ:
- return " stop due to read";
+ return "\"stop due to read\"";
case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
- return "Test stop due to read after max-1 reqs";
+ return "\"stop due to read after max-1 reqs\"";
case TEST_STOP_DUE_TO_EMPTY_QUEUE:
- return "Test stop due to empty queue";
+ return "\"stop due to empty queue\"";
case TEST_STOP_DUE_TO_MAX_REQ_NUM:
- return "Test stop due to max req num";
+ return "\"stop due to max req num\"";
case TEST_STOP_DUE_TO_THRESHOLD:
- return "Test stop due to exceeding threshold";
+ return "\"stop due to exceeding threshold\"";
case TEST_RET_ABORT:
- return "Test err_check return abort";
+ return "\"err_check return abort\"";
case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
- return "Test err_check return partial followed by success";
+ return "\"err_check return partial followed by success\"";
case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
- return "Test err_check return partial followed by abort";
+ return "\"err_check return partial followed by abort\"";
case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
- return "Test err_check return partial multiple until success";
+ return "\"err_check return partial multiple until success\"";
case TEST_RET_PARTIAL_MAX_FAIL_IDX:
- return "Test err_check return partial max fail index";
+ return "\"err_check return partial max fail index\"";
case TEST_RET_RETRY:
- return "Test err_check return retry";
+ return "\"err_check return retry\"";
case TEST_RET_CMD_ERR:
- return "Test err_check return cmd error";
+ return "\"err_check return cmd error\"";
case TEST_RET_DATA_ERR:
- return "Test err_check return data error";
+ return "\"err_check return data error\"";
case TEST_HDR_INVALID_VERSION:
- return "Test invalid - wrong header version";
+ return "\"invalid - wrong header version\"";
case TEST_HDR_WRONG_WRITE_CODE:
- return "Test invalid - wrong write code";
+ return "\"invalid - wrong write code\"";
case TEST_HDR_INVALID_RW_CODE:
- return "Test invalid - wrong R/W code";
+ return "\"invalid - wrong R/W code\"";
case TEST_HDR_DIFFERENT_ADDRESSES:
- return "Test invalid - header different addresses";
+ return "\"invalid - header different addresses\"";
case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
- return "Test invalid - header req num smaller than actual";
+ return "\"invalid - header req num smaller than actual\"";
case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
- return "Test invalid - header req num larger than actual";
+ return "\"invalid - header req num larger than actual\"";
case TEST_HDR_CMD23_PACKED_BIT_SET:
- return "Test invalid - header cmd23 packed bit set";
+ return "\"invalid - header cmd23 packed bit set\"";
case TEST_CMD23_MAX_PACKED_WRITES:
- return "Test invalid - cmd23 max packed writes";
+ return "\"invalid - cmd23 max packed writes\"";
case TEST_CMD23_ZERO_PACKED_WRITES:
- return "Test invalid - cmd23 zero packed writes";
+ return "\"invalid - cmd23 zero packed writes\"";
case TEST_CMD23_PACKED_BIT_UNSET:
- return "Test invalid - cmd23 packed bit unset";
+ return "\"invalid - cmd23 packed bit unset\"";
case TEST_CMD23_REL_WR_BIT_SET:
- return "Test invalid - cmd23 rel wr bit set";
+ return "\"invalid - cmd23 rel wr bit set\"";
case TEST_CMD23_BITS_16TO29_SET:
- return "Test invalid - cmd23 bits [16-29] set";
+ return "\"invalid - cmd23 bits [16-29] set\"";
case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
- return "Test invalid - cmd23 header block not in count";
+ return "\"invalid - cmd23 header block not in count\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER:
- return "\nTest packing control - pack n";
+ return "\"packing control - pack n\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
- return "\nTest packing control - pack n followed by read";
+ return "\"packing control - pack n followed by read\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
- return "\nTest packing control - pack n followed by flush";
+ return "\"packing control - pack n followed by flush\"";
case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
- return "\nTest packing control - pack one followed by read";
+ return "\"packing control - pack one followed by read\"";
case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
- return "\nTest packing control - pack threshold";
+ return "\"packing control - pack threshold\"";
case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
- return "\nTest packing control - no packing";
+ return "\"packing control - no packing\"";
case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
- return "\nTest packing control - no packing, trigger requests";
+ return "\"packing control - no packing, trigger requests\"";
case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
- return "\nTest packing control - no pack, trigger-read-trigger";
+ return "\"packing control - no pack, trigger-read-trigger\"";
case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
- return "\nTest packing control- no pack, trigger-flush-trigger";
+ return "\"packing control- no pack, trigger-flush-trigger\"";
case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
- return "\nTest packing control - mix: pack -> no pack -> pack";
+ return "\"packing control - mix: pack -> no pack -> pack\"";
case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
- return "\nTest packing control - mix: no pack->pack->no pack";
+ return "\"packing control - mix: no pack->pack->no pack\"";
case TEST_WRITE_DISCARD_SANITIZE_READ:
- return "\nTest write, discard, sanitize";
+ return "\"write, discard, sanitize\"";
case BKOPS_DELAYED_WORK_LEVEL_1:
- return "\nTest delayed work BKOPS level 1";
+ return "\"delayed work BKOPS level 1\"";
case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
- return "\nTest delayed work BKOPS level 1 with HPI";
+ return "\"delayed work BKOPS level 1 with HPI\"";
case BKOPS_CANCEL_DELAYED_WORK:
- return "\nTest cancel delayed BKOPS work";
+ return "\"cancel delayed BKOPS work\"";
case BKOPS_URGENT_LEVEL_2:
- return "\nTest urgent BKOPS level 2";
+ return "\"urgent BKOPS level 2\"";
case BKOPS_URGENT_LEVEL_2_TWO_REQS:
- return "\nTest urgent BKOPS level 2, followed by a request";
+ return "\"urgent BKOPS level 2, followed by a request\"";
case BKOPS_URGENT_LEVEL_3:
- return "\nTest urgent BKOPS level 3";
+ return "\"urgent BKOPS level 3\"";
case TEST_LONG_SEQUENTIAL_READ:
- return "Test long sequential read";
+ return "\"long sequential read\"";
case TEST_LONG_SEQUENTIAL_WRITE:
- return "Test long sequential write";
+ return "\"long sequential write\"";
default:
- return "Unknown testcase";
+ return " Unknown testcase";
}
return NULL;
@@ -1702,6 +1702,12 @@
(bkops_stat->suspend == 0) &&
(bkops_stat->hpi == 1))
goto exit;
+ /* this might happen due to timing issues */
+ else if
+ ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
+ (bkops_stat->suspend == 0) &&
+ (bkops_stat->hpi == 0))
+ goto ignore;
else
goto fail;
break;
@@ -1735,6 +1741,9 @@
exit:
return 0;
+ignore:
+ test_iosched_set_ignore_round(true);
+ return 0;
fail:
if (td->fs_wr_reqs_during_test) {
test_pr_info("%s: wr reqs during test, cancel the round",
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 8897f18a..8eb787d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -210,7 +210,9 @@
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
+ mq->num_wr_reqs_to_start_packing =
+ min_t(int, (int)card->ext_csd.max_packed_writes,
+ DEFAULT_NUM_REQS_TO_START_PACK);
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 81a4ba0..4e76f61 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -162,10 +162,7 @@
if (ret)
goto out;
- if (card->host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50)) {
+ if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50;
@@ -480,8 +477,7 @@
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@@ -491,23 +487,27 @@
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
@@ -515,6 +515,7 @@
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
@@ -653,11 +654,7 @@
* systems that claim 1.8v signalling in fact do not support
* it.
*/
- if ((ocr & R4_18V_PRESENT) &&
- (host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50))) {
+ if ((ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
true);
if (err) {
@@ -964,10 +961,12 @@
mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
- if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
+ if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
+ sdio_reset(host);
+ mmc_go_idle(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
- else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
@@ -1054,6 +1053,10 @@
goto out;
}
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
@@ -1119,6 +1122,10 @@
/*
* Detect and init the card.
*/
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err) {
if (err == -EAGAIN) {
@@ -1236,79 +1243,6 @@
int sdio_reset_comm(struct mmc_card *card)
{
- struct mmc_host *host = card->host;
- u32 ocr;
- int err;
-
- printk("%s():\n", __func__);
- mmc_claim_host(host);
-
- mmc_go_idle(host);
-
- mmc_set_clock(host, host->f_min);
-
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (err)
- goto err;
-
- host->ocr = mmc_select_voltage(host, ocr);
- if (!host->ocr) {
- err = -EINVAL;
- goto err;
- }
-
- err = mmc_send_io_op_cond(host, host->ocr, &ocr);
- if (err)
- goto err;
-
- if (mmc_host_is_spi(host)) {
- err = mmc_spi_set_crc(host, use_spi_crc);
- if (err)
- goto err;
- }
-
- if (!mmc_host_is_spi(host)) {
- err = mmc_send_relative_addr(host, &card->rca);
- if (err)
- goto err;
- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
- }
- if (!mmc_host_is_spi(host)) {
- err = mmc_select_card(card);
- if (err)
- goto err;
- }
-
- /*
- * Switch to high-speed (if supported).
- */
- err = sdio_enable_hs(card);
- if (err > 0)
- mmc_sd_go_highspeed(card);
- else if (err)
- goto err;
-
- /*
- * Change to the card's maximum speed.
- */
- mmc_set_clock(host, mmc_sdio_get_max_clock(card));
-
- err = sdio_enable_4bit_bus(card);
- if (err > 0) {
- if (host->caps & MMC_CAP_8_BIT_DATA)
- mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
- else if (host->caps & MMC_CAP_4_BIT_DATA)
- mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
- }
- else if (err)
- goto err;
-
- mmc_release_host(host);
- return 0;
-err:
- printk("%s: Error resetting SDIO communications (%d)\n",
- mmc_hostname(host), err);
- mmc_release_host(host);
- return err;
+ return mmc_power_restore_host(card->host);
}
EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index de7e5bc..c8b47b9 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -46,6 +46,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/pm_qos.h>
+#include <linux/iopoll.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
@@ -161,6 +162,7 @@
static int msmsdcc_runtime_resume(struct device *dev);
static int msmsdcc_dt_get_array(struct device *dev, const char *prop_name,
u32 **out_array, int *len, int size);
+static int msmsdcc_execute_tuning(struct mmc_host *mmc, u32 opcode);
static inline unsigned short msmsdcc_get_nr_sg(struct msmsdcc_host *host)
{
@@ -348,23 +350,23 @@
* SDCC controller itself can support hard reset.
*/
if (is_sw_hard_reset(host)) {
- ktime_t start;
+ u32 pwr;
writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
| MCI_SW_RST, host->base + MMCIPOWER);
msmsdcc_sync_reg_wr(host);
- start = ktime_get();
- while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST) {
- /*
- * See comment in msmsdcc_soft_reset() on choosing 1ms
- * poll timeout.
- */
- if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
- pr_err("%s: %s failed\n",
- mmc_hostname(host->mmc), __func__);
- BUG();
- }
+ /*
+ * See comment in msmsdcc_soft_reset() on choosing 1ms
+ * poll timeout.
+ */
+ ret = readl_poll_timeout_noirq(host->base + MMCIPOWER,
+ pwr, !(pwr & MCI_SW_RST), 100, 10);
+
+ if (ret) {
+ pr_err("%s: %s failed (%d)\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ BUG();
}
} else {
ret = clk_reset(host->clk, CLK_RESET_ASSERT);
@@ -470,7 +472,9 @@
readl_relaxed(host->base + MMCISTATUS)
& MCI_TXACTIVE ? "TX" : "RX");
msmsdcc_dump_sdcc_state(host);
- BUG();
+ msmsdcc_reset_and_restore(host);
+ host->pending_dpsm_reset = false;
+ goto out;
}
}
}
@@ -1198,8 +1202,9 @@
*c |= MCI_CSPM_DATCMD;
/* Check if AUTO CMD19/CMD21 is required or not? */
- if (host->tuning_needed &&
- (host->en_auto_cmd19 || host->en_auto_cmd21)) {
+ if (host->tuning_needed && (cmd->mrq->data &&
+ (cmd->mrq->data->flags & MMC_DATA_READ)) &&
+ (host->en_auto_cmd19 || host->en_auto_cmd21)) {
/*
* For open ended block read operation (without CMD23),
* AUTO_CMD19/AUTO_CMD21 bit should be set while sending
@@ -1213,7 +1218,8 @@
MMC_READ_MULTIPLE_BLOCK) ||
(!host->curr.mrq->sbc &&
(cmd->opcode == MMC_READ_SINGLE_BLOCK ||
- cmd->opcode == MMC_READ_MULTIPLE_BLOCK))) {
+ cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == SD_IO_RW_EXTENDED))) {
msmsdcc_enable_cdr_cm_sdc4_dll(host);
if (host->en_auto_cmd19 &&
host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
@@ -1413,6 +1419,10 @@
else
data->error = -ETIMEDOUT;
}
+ /* In case of DATA CRC/timeout error, execute tuning again */
+ if (host->tuning_needed && !host->tuning_in_progress)
+ host->tuning_done = false;
+
} else if (status & MCI_RXOVERRUN) {
pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
data->error = -EIO;
@@ -1765,6 +1775,8 @@
msmsdcc_dump_sdcc_state(host);
/* Execute full tuning in case of CRC errors */
host->saved_tuning_phase = INVALID_TUNING_PHASE;
+ if (host->tuning_needed)
+ host->tuning_done = false;
cmd->error = -EILSEQ;
}
@@ -1861,9 +1873,10 @@
*/
wake_lock(&host->sdio_wlock);
} else {
- if (mmc->card && !mmc_card_sdio(mmc->card)) {
- WARN(1, "%s: SDCC core interrupt received for non-SDIO cards when SDCC clocks are off\n",
- mmc_hostname(mmc));
+ if (!mmc->card || (mmc->card &&
+ !mmc_card_sdio(mmc->card))) {
+ pr_warning("%s: SDCC core interrupt received for non-SDIO cards when SDCC clocks are off\n",
+ mmc_hostname(mmc));
ret = 1;
break;
}
@@ -1895,9 +1908,10 @@
#endif
if (status & MCI_SDIOINTROPE) {
- if (mmc->card && !mmc_card_sdio(mmc->card)) {
- WARN(1, "%s: SDIO interrupt received for non-SDIO card\n",
- mmc_hostname(mmc));
+ if (!mmc->card || (mmc->card &&
+ !mmc_card_sdio(mmc->card))) {
+ pr_warning("%s: SDIO interrupt (SDIOINTROPE) received for non-SDIO card\n",
+ mmc_hostname(mmc));
ret = 1;
break;
}
@@ -2144,6 +2158,22 @@
}
}
+ /*
+ * Check if DLL retuning is required? If yes, perform it here before
+ * starting new request.
+ */
+ if (host->tuning_needed && !host->tuning_in_progress &&
+ !host->tuning_done) {
+ pr_debug("%s: %s: execute_tuning for timing mode = %d\n",
+ mmc_hostname(mmc), __func__, host->mmc->ios.timing);
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+ msmsdcc_execute_tuning(mmc,
+ MMC_SEND_TUNING_BLOCK);
+ else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200)
+ msmsdcc_execute_tuning(mmc,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ }
+
spin_lock_irqsave(&host->lock, flags);
if (host->eject) {
@@ -3340,10 +3370,24 @@
/* Card clock frequency must be > 100MHz to enable tuning */
clk |= (4 << 14);
host->tuning_needed = 1;
- } else if (ios->timing == MMC_TIMING_UHS_DDR50) {
- clk |= (3 << 14);
} else {
- clk |= (2 << 14); /* feedback clock */
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ clk |= (3 << 14);
+ else
+ clk |= (2 << 14); /* feedback clock */
+
+ host->tuning_done = false;
+ if (atomic_read(&host->clks_on)) {
+ /* Write 1 to DLL_RST bit of MCI_DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) | MCI_DLL_RST),
+ host->base + MCI_DLL_CONFIG);
+
+ /* Write 1 to DLL_PDN bit of MCI_DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) | MCI_DLL_PDN),
+ host->base + MCI_DLL_CONFIG);
+ }
}
/* Select free running MCLK as input clock of cm_dll_sdc4 */
@@ -4178,6 +4222,8 @@
out:
spin_lock_irqsave(&host->lock, flags);
host->tuning_in_progress = 0;
+ if (!rc)
+ host->tuning_done = true;
spin_unlock_irqrestore(&host->lock, flags);
exit:
pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index bb1b211..500b5fb 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -401,6 +401,7 @@
bool io_pad_pwr_switch;
bool tuning_in_progress;
bool tuning_needed;
+ bool tuning_done;
bool en_auto_cmd19;
bool en_auto_cmd21;
bool sdio_gpio_lpm;
diff --git a/drivers/mtd/devices/msm_nand.c b/drivers/mtd/devices/msm_nand.c
index c43abca..5cace6b 100644
--- a/drivers/mtd/devices/msm_nand.c
+++ b/drivers/mtd/devices/msm_nand.c
@@ -6757,6 +6757,23 @@
return 0;
}
+static const unsigned int bch_sup_cntrl[] = {
+ 0x307, /* MSM7x2xA */
+ 0x4030, /* MDM 9x15 */
+};
+
+static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
+ if (hw_id == bch_sup_cntrl[i])
+ return true;
+ }
+
+ return false;
+}
+
/**
* msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
* @param mtd MTD device structure
@@ -6778,6 +6795,7 @@
uint32_t devcfg;
struct nand_flash_dev *flashdev = NULL;
struct nand_manufacturers *flashman = NULL;
+ unsigned int hw_id;
/* Probe the Flash device for ONFI compliance */
if (!flash_onfi_probe(chip)) {
@@ -6845,7 +6863,8 @@
mtd_writesize = mtd->writesize >> 1;
/* Check whether controller and NAND device support 8bit ECC*/
- if ((flash_rd_reg(chip, MSM_NAND_HW_INFO) == 0x307)
+ hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
+ if (msm_nand_has_bch_ecc_engine(hw_id)
&& (supported_flash.ecc_correctability >= 8)) {
pr_info("Found supported NAND device for %dbit ECC\n",
supported_flash.ecc_correctability);
@@ -6853,7 +6872,8 @@
} else {
pr_info("Found a supported NAND device\n");
}
- pr_info("NAND Id : 0x%x\n", supported_flash.flash_id);
+ pr_info("NAND Controller ID : 0x%x\n", hw_id);
+ pr_info("NAND Device ID : 0x%x\n", supported_flash.flash_id);
pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
pr_info("Density : %lld MByte\n", (mtd->size>>20));
pr_info("Pagesize : %d Bytes\n", mtd->writesize);
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index d709e17..9a6cc80 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -2351,7 +2351,7 @@
{
struct msm_nand_info *info;
struct resource *res;
- int err, n_parts;
+ int err;
struct device_node *pnode;
struct mtd_part_parser_data parser_data;
@@ -2443,10 +2443,10 @@
goto free_bam;
}
parser_data.of_node = pnode;
- n_parts = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
+ err = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
NULL, 0);
- if (n_parts < 0) {
- pr_err("Unable to register MTD partitions %d\n", n_parts);
+ if (err < 0) {
+ pr_err("Unable to register MTD partitions %d\n", err);
goto free_bam;
}
dev_set_drvdata(&pdev->dev, info);
@@ -2455,7 +2455,6 @@
info->nand_phys, info->bam_phys, info->bam_irq);
pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
- pr_info("Found %d MTD partitions\n", n_parts);
goto out;
free_bam:
msm_nand_bam_free(info);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 6e42fdb..71a9860 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -566,8 +566,6 @@
void __iomem *pmu_spare_reg;
u32 reg = 0;
unsigned long flags;
- struct clk *cxo = clk_get(&penv->pdev->dev, "cxo");
- int rc = 0;
if (!enable_wcnss_suspend_notify)
return;
@@ -576,18 +574,12 @@
return;
/* For Riva */
- rc = clk_prepare_enable(cxo);
- if (rc) {
- pr_err("cxo enable failed\n");
- return;
- }
pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
spin_lock_irqsave(®_spinlock, flags);
reg = readl_relaxed(pmu_spare_reg);
reg |= RIVA_SUSPEND_BIT;
writel_relaxed(reg, pmu_spare_reg);
spin_unlock_irqrestore(®_spinlock, flags);
- clk_disable_unprepare(cxo);
}
EXPORT_SYMBOL(wcnss_suspend_notify);
@@ -596,8 +588,6 @@
void __iomem *pmu_spare_reg;
u32 reg = 0;
unsigned long flags;
- struct clk *cxo = clk_get(&penv->pdev->dev, "cxo");
- int rc = 0;
if (!enable_wcnss_suspend_notify)
return;
@@ -608,17 +598,11 @@
/* For Riva */
pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
- rc = clk_prepare_enable(cxo);
- if (rc) {
- pr_err("cxo enable failed\n");
- return;
- }
spin_lock_irqsave(®_spinlock, flags);
reg = readl_relaxed(pmu_spare_reg);
reg &= ~RIVA_SUSPEND_BIT;
writel_relaxed(reg, pmu_spare_reg);
spin_unlock_irqrestore(®_spinlock, flags);
- clk_disable_unprepare(cxo);
}
EXPORT_SYMBOL(wcnss_resume_notify);
@@ -858,7 +842,6 @@
else
wcnss_gpios_config(penv->gpios_5wire, false);
fail_gpio_res:
- kfree(penv);
penv = NULL;
return ret;
}
@@ -899,7 +882,7 @@
}
/* create an environment to track the device */
- penv = kzalloc(sizeof(*penv), GFP_KERNEL);
+ penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL);
if (!penv) {
dev_err(&pdev->dev, "cannot allocate device memory.\n");
return -ENOMEM;
@@ -908,8 +891,10 @@
/* register sysfs entries */
ret = wcnss_create_sysfs(&pdev->dev);
- if (ret)
+ if (ret) {
+ penv = NULL;
return -ENOENT;
+ }
#ifdef MODULE
@@ -944,6 +929,7 @@
wcnss_wlan_remove(struct platform_device *pdev)
{
wcnss_remove_sysfs(&pdev->dev);
+ penv = NULL;
return 0;
}
@@ -997,7 +983,6 @@
subsystem_put(penv->pil);
- kfree(penv);
penv = NULL;
}
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 34e1d40..75cc086 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -76,4 +76,18 @@
PNP PMIC. It configures the frequency of clkdiv outputs on the
PMIC. These clocks are typically wired through alternate functions
on gpio pins.
+
+config IPA
+ tristate "IPA support"
+ depends on SPS
+ help
+ This driver supports the Internet Packet Accelerator (IPA) core.
+ IPA is a programmable protocol processor HW block.
+ It is designed to support generic HW processing of UL/DL IP packets
+ for various use cases independent of radio technology.
+ The driver support client connection and configuration
+ for the IPA core.
+ Kernel and user-space processes can call the IPA driver
+ to configure IPA core.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 35efd91..0a755d3 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MSM_SSBI) += ssbi.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_IPA) += ipa/
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..ded5b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+ ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
new file mode 100644
index 0000000..0ae2552
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+static struct a2_service_cb_type {
+ void *tx_complete_cb;
+ void *rx_cb;
+ u32 producer_handle;
+ u32 consumer_handle;
+} a2_service_cb;
+
+static struct sps_mem_buffer data_mem_buf[2];
+static struct sps_mem_buffer desc_mem_buf[2];
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe);
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl);
+
+/**
+ * a2_mux_initialize() - initialize A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ */
+int a2_mux_initialize(void)
+{
+ (void) msm_bam_dmux_ul_power_vote();
+
+ return 0;
+}
+
+/**
+ * a2_mux_close() - close A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_close(void)
+{
+ int ret = 0;
+
+ (void) msm_bam_dmux_ul_power_unvote();
+
+ ret = ipa_disconnect(a2_service_cb.consumer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = ipa_disconnect(a2_service_cb.producer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+/**
+ * a2_mux_open_port() - open connection to A2
+ * @wwan_logical_channel_id: WWAN logical channel ID
+ * @rx_cb: Rx callback
+ * @tx_complete_cb: Tx completed callback
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb)
+{
+ int ret = 0;
+ u8 src_pipe = 0;
+ u8 dst_pipe = 0;
+ struct sps_pipe *a2_to_ipa_pipe = NULL;
+ struct sps_pipe *ipa_to_a2_pipe = NULL;
+
+ (void) wwan_logical_channel_id;
+
+ a2_service_cb.rx_cb = rx_cb;
+ a2_service_cb.tx_complete_cb = tx_complete_cb;
+
+ ret = connect_pipe_ipa(A2_TO_IPA,
+ &src_pipe,
+ &(a2_service_cb.consumer_handle),
+ a2_to_ipa_pipe);
+ if (ret) {
+ pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
+ goto bail;
+ }
+
+ ret = connect_pipe_ipa(IPA_TO_A2,
+ &dst_pipe,
+ &(a2_service_cb.producer_handle),
+ ipa_to_a2_pipe);
+ if (ret) {
+ pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
+ sps_disconnect(a2_to_ipa_pipe);
+ sps_free_endpoint(a2_to_ipa_pipe);
+ (void) ipa_disconnect(a2_service_cb.consumer_handle);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe)
+{
+ int ret;
+ struct sps_connect connection = {0, };
+ u32 a2_handle = 0;
+ u32 a2_phy_addr = 0;
+ struct a2_mux_pipe_connection pipe_connection = { 0, };
+ struct ipa_connect_params ipa_in_params;
+ struct ipa_sps_params sps_out_params;
+
+ memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+ memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+ if (!usb_pipe_idx || !clnt_hdl) {
+ pr_err("connect_pipe_ipa :: null arguments\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
+ if (ret) {
+ pr_err("ipa_get_a2_mux_pipe_info failed\n");
+ goto bail;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ a2_phy_addr = pipe_connection.src_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+ ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+ ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+ pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
+ pipe_connection.src_pipe_index);
+ ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
+ } else {
+ a2_phy_addr = pipe_connection.dst_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+ ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
+ }
+
+ ret = sps_phy2h(a2_phy_addr, &a2_handle);
+ if (ret) {
+ pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.client_bam_hdl = a2_handle;
+ ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
+ ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
+
+ if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
+ pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
+ ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
+ pipe_connection.data_fifo_base_offset,
+ pipe_connection.data_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: data fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
+ pipe_connection.desc_fifo_base_offset,
+ pipe_connection.desc_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: desc. fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.data = data_mem_buf[pipe_dir];
+ ipa_in_params.desc = desc_mem_buf[pipe_dir];
+ }
+
+ ret = a2_ipa_connect_pipe(&ipa_in_params,
+ &sps_out_params,
+ clnt_hdl);
+ if (ret) {
+ pr_err("-**- USB-IPA info: ipa_connect failed\n");
+ pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
+ goto bail;
+ }
+
+ pipe = sps_alloc_endpoint();
+ if (pipe == NULL) {
+ pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+ ret = -ENOMEM;
+ goto a2_ipa_connect_pipe_failed;
+ }
+
+ ret = sps_get_config(pipe, &connection);
+ if (ret) {
+ pr_err("%s: tx get config failed %d\n", __func__, ret);
+ goto get_config_failed;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ connection.mode = SPS_MODE_SRC;
+ *usb_pipe_idx = connection.src_pipe_index;
+ connection.source = a2_handle;
+ connection.destination = sps_out_params.ipa_bam_hdl;
+ connection.src_pipe_index = pipe_connection.src_pipe_index;
+ connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
+ } else {
+ connection.mode = SPS_MODE_DEST;
+ *usb_pipe_idx = connection.dest_pipe_index;
+ connection.source = sps_out_params.ipa_bam_hdl;
+ connection.destination = a2_handle;
+ connection.src_pipe_index = sps_out_params.ipa_ep_idx;
+ connection.dest_pipe_index = pipe_connection.dst_pipe_index;
+ }
+
+ connection.event_thresh = 16;
+ connection.data = sps_out_params.data;
+ connection.desc = sps_out_params.desc;
+
+ ret = sps_connect(pipe, &connection);
+ if (ret < 0) {
+ pr_err("%s: tx connect error %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = 0;
+ goto bail;
+error:
+ sps_disconnect(pipe);
+get_config_failed:
+ sps_free_endpoint(pipe);
+a2_ipa_connect_pipe_failed:
+ (void) ipa_disconnect(*clnt_hdl);
+bail:
+ return ret;
+}
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl)
+{
+ return ipa_connect(in_params, out_params, clnt_hdl);
+}
+
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
new file mode 100644
index 0000000..80885da
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _A2_SERVICE_H_
+#define _A2_SERVICE_H_
+
+int a2_mux_initialize(void);
+
+int a2_mux_close(void);
+
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb);
+
+#endif /* _A2_SERVICE_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
new file mode 100644
index 0000000..8f68ef5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -0,0 +1,1790 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_READ_MAX (16)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_DMA_POOL_SIZE (512)
+#define IPA_DMA_POOL_ALIGNMENT (4)
+#define IPA_DMA_POOL_BOUNDARY (1024)
+#define WLAN_AMPDU_TX_EP (15)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+struct ipa_plat_drv_res {
+ u32 ipa_mem_base;
+ u32 ipa_mem_size;
+ u32 bam_mem_base;
+ u32 bam_mem_size;
+ u32 ipa_irq;
+ u32 bam_irq;
+ u32 ipa_pipe_mem_start_ofst;
+ u32 ipa_pipe_mem_size;
+ struct a2_mux_pipe_connection a2_to_ipa_pipe;
+ struct a2_mux_pipe_connection ipa_to_a2_pipe;
+};
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+static struct of_device_id ipa_plat_drv_match[] = {
+ {
+ .compatible = "qcom,ipa",
+ },
+
+ {
+ }
+};
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct device *ipa_dev;
+
+struct ipa_context *ipa_ctx;
+
+static bool polling_mode;
+module_param(polling_mode, bool, 0644);
+MODULE_PARM_DESC(polling_mode,
+ "1 - pure polling mode; 0 - interrupt+polling mode");
+static uint polling_delay_ms = 50;
+module_param(polling_delay_ms, uint, 0644);
+MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
+static bool hdr_tbl_lcl = 1;
+module_param(hdr_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
+static bool ip4_rt_tbl_lcl = 1;
+module_param(ip4_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_rt_tbl_lcl,
+ "where ip4 rt tables reside 1-local; 0-system");
+static bool ip6_rt_tbl_lcl = 1;
+module_param(ip6_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_rt_tbl_lcl,
+ "where ip6 rt tables reside 1-local; 0-system");
+static bool ip4_flt_tbl_lcl = 1;
+module_param(ip4_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_flt_tbl_lcl,
+ "where ip4 flt tables reside 1-local; 0-system");
+static bool ip6_flt_tbl_lcl = 1;
+module_param(ip6_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_flt_tbl_lcl,
+ "where ip6 flt tables reside 1-local; 0-system");
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata);
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection);
+
+static void ipa_set_aggregation_params(void);
+
+static ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ u32 reg_val = 0xfeedface;
+ char str[IPA_READ_MAX];
+ int result;
+ static int read_cnt;
+
+ if (read_cnt) {
+ IPAERR("only supports one call to read\n");
+ return 0;
+ }
+
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST);
+ result = scnprintf(str, IPA_READ_MAX, "%x\n", reg_val);
+ if (copy_to_user(buf, str, result))
+ return -EFAULT;
+ read_cnt = 1;
+
+ return result;
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_context *ctx = NULL;
+
+ IPADBG("ENTER\n");
+ ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 header[128] = { 0 };
+ u8 *param = NULL;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_v4_nat_del nat_del;
+
+ IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case IPA_IOC_ALLOC_NAT_MEM:
+ if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_V4_INIT_NAT:
+ if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_init_cmd(&nat_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_NAT_DMA:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz =
+ sizeof(struct ipa_ioc_nat_dma_cmd) +
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+ sizeof(struct ipa_ioc_nat_dma_one);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_V4_DEL_NAT:
+ if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_del_cmd(&nat_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr) +
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+ sizeof(struct ipa_hdr_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr) +
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+ sizeof(struct ipa_hdr_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule) +
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+ sizeof(struct ipa_rt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule) +
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_COMMIT_HDR:
+ retval = ipa_commit_hdr();
+ break;
+ case IPA_IOC_RESET_HDR:
+ retval = ipa_reset_hdr();
+ break;
+ case IPA_IOC_COMMIT_RT:
+ retval = ipa_commit_rt(arg);
+ break;
+ case IPA_IOC_RESET_RT:
+ retval = ipa_reset_rt(arg);
+ break;
+ case IPA_IOC_COMMIT_FLT:
+ retval = ipa_commit_flt(arg);
+ break;
+ case IPA_IOC_RESET_FLT:
+ retval = ipa_reset_flt(arg);
+ break;
+ case IPA_IOC_DUMP:
+ ipa_dump();
+ break;
+ case IPA_IOC_GET_RT_TBL:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_RT_TBL:
+ retval = ipa_put_rt_tbl(arg);
+ break;
+ case IPA_IOC_GET_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_HDR:
+ retval = ipa_put_hdr(arg);
+ break;
+ case IPA_IOC_SET_FLT:
+ retval = ipa_cfg_filter(arg);
+ break;
+ case IPA_IOC_COPY_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ default: /* redundant, as cmd was checked against MAXNR */
+ return -ENOTTY;
+ }
+ kfree(param);
+
+ return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to A5 */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /*
+ * because these tables are the very first to be added, they will both
+ * have the same index (0) which is essential for programming the
+ * "route" end-point config
+ */
+
+ kfree(rt_rule);
+
+ return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ struct ipa_route route = { 0 };
+ int ret;
+
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr) {
+ IPAERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+ strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+
+ /*
+ * only single stream for MBIM supported and no exception packets
+ * expected so set default header to zero
+ */
+ hdr_entry->hdr_len = 1;
+ hdr_entry->hdr[0] = 0;
+
+ /*
+ * SW does not know anything about default exception header so
+ * we don't set it. IPA HW will use it as a template
+ */
+ if (ipa_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* exception packets goto LAN-WAN pipe from IPA to A5 */
+ route.route_def_pipe = IPA_A5_LAN_WAN_IN;
+ route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+ if (ipa_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static void ipa_handle_tx_poll_for_pipe(struct ipa_sys_context *sys)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt, *t;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ int ret;
+
+ while (1) {
+ iov.addr = 0;
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ pr_err("%s: sps_get_iovec failed %d\n", __func__, ret);
+ break;
+ }
+ if (!iov.addr)
+ break;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ tx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ switch (tx_pkt->cnt) {
+ case 1:
+ ipa_write_done(&tx_pkt->work);
+ break;
+ case 0xFFFF:
+ /* reached end of set */
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(tx_pkt, t,
+ &sys->wait_desc_list, link) {
+ list_del(&tx_pkt->link);
+ list_add(&tx_pkt->link, &sys->head_desc_list);
+ }
+ tx_pkt =
+ list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ ipa_write_done(&tx_pkt->work);
+ break;
+ default:
+ /* keep looping till reach the end of the set */
+ spin_lock_irqsave(&sys->spinlock,
+ irq_flags);
+ list_del(&tx_pkt->link);
+ list_add_tail(&tx_pkt->link,
+ &sys->wait_desc_list);
+ spin_unlock_irqrestore(&sys->spinlock,
+ irq_flags);
+ break;
+ }
+ }
+}
+
+static void ipa_poll_function(struct work_struct *work)
+{
+ int ret;
+ int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
+ IPA_A5_WLAN_AMPDU_OUT };
+ int i;
+ int num_tx_pipes;
+
+ /* check all the system pipes for tx completions and rx available */
+ if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
+ ipa_handle_rx_core();
+
+ num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
+
+ if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
+ num_tx_pipes--;
+
+ for (i = 0; i < num_tx_pipes; i++)
+ if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
+ ipa_handle_tx_poll_for_pipe(&ipa_ctx->sys[tx_pipes[i]]);
+
+ /* re-post the poll work */
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+
+ return;
+}
+
+static int ipa_setup_a5_pipes(void)
+{
+ struct ipa_sys_connect_params sys_in;
+ int result = 0;
+
+ /* CMD OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_CMD_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail;
+ }
+
+ if (ipa_setup_exception_path()) {
+ IPAERR(":fail to setup excp path\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+
+ /* LAN-WAN IN (IPA->A5) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = 8; /* size of A5 exception hdr */
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+ /* LAN-WAN OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_data_out;
+ }
+ if (ipa_ctx->polling_mode) {
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ result =
+ schedule_delayed_work_on(smp_processor_id(),
+ &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+ if (!result) {
+ IPAERR(":schedule delayed work failed.\n");
+ goto fail_schedule_delayed_work;
+ }
+ }
+
+ return 0;
+
+fail_schedule_delayed_work:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+fail_data_out:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_cmd:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail:
+ return result;
+}
+
+static void ipa_teardown_a5_pipes(void)
+{
+ cancel_delayed_work(&ipa_ctx->poll_work);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int rc = 0;
+
+ if (!pdata || !pdev)
+ goto err;
+
+ /* retrieve device tree parameters */
+ for_each_child_of_node(pdev->dev.of_node, node)
+ {
+ const char *str;
+
+ rc = of_property_read_string(node, "label", &str);
+ if (rc) {
+ IPAERR("Cannot read string\n");
+ goto err;
+ }
+
+ /* Check if connection type is supported */
+ if (strncmp(str, "a2-to-ipa", 10)
+ && strncmp(str, "ipa-to-a2", 10))
+ goto err;
+
+ if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
+ && IPA_TO_A2 == pipe_dir)
+ continue; /* skip to the next pipe */
+ else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
+ && A2_TO_IPA == pipe_dir)
+ continue; /* skip to the next pipe */
+
+
+ rc = ipa_update_connections_info(node, pdata);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ IPAERR("%s: failed\n", __func__);
+
+ return rc;
+}
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection)
+{
+ u32 rc;
+ char *key;
+ uint32_t val;
+ enum ipa_pipe_mem_type mem_type;
+
+ if (!pipe_connection || !node)
+ goto err;
+
+ key = "qcom,src-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_phy_addr = val;
+
+ key = "qcom,ipa-bam-mem-type";
+ rc = of_property_read_u32(node, key, &mem_type);
+ if (rc)
+ goto err;
+ pipe_connection->mem_type = mem_type;
+
+ key = "qcom,src-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_pipe_index = val;
+
+ key = "qcom,dst-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_phy_addr = val;
+
+ key = "qcom,dst-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_pipe_index = val;
+
+ key = "qcom,data-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_base_offset = val;
+
+ key = "qcom,data-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_size = val;
+
+ key = "qcom,descriptor-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->desc_fifo_base_offset = val;
+
+ key = "qcom,descriptor-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+
+ pipe_connection->desc_fifo_size = val;
+
+ return 0;
+err:
+ IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+
+ return rc;
+}
+
+/**
+* ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
+*
+* @pipe_dir: pipe direction
+* @pipe_connect: connect structure containing the parameters fetched from DTS
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect)
+{
+ if (!pipe_connect) {
+ IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
+ return -EFAULT;
+ }
+
+ switch (pipe_dir) {
+ case A2_TO_IPA:
+ *pipe_connect = ipa_res.a2_to_ipa_pipe;
+ break;
+ case IPA_TO_A2:
+ *pipe_connect = ipa_res.ipa_to_a2_pipe;
+ break;
+ default:
+ IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void ipa_set_aggregation_params(void)
+{
+ struct ipa_ep_cfg_aggr agg_params;
+ u32 producer_hdl = 0;
+ u32 consumer_hdl = 0;
+
+ rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+
+ agg_params.aggr = ipa_ctx->aggregation_type;
+ agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
+ agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
+
+ /* configure aggregation on producer */
+ agg_params.aggr_en = IPA_ENABLE_AGGR;
+ ipa_cfg_ep_aggr(producer_hdl, &agg_params);
+
+ /* configure deaggregation on consumer */
+ agg_params.aggr_en = IPA_ENABLE_DEAGGR;
+ ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
+
+}
+
+/*
+ * The following device attributes are for configuring the aggregation
+ * attributes when the driver is already running.
+ * The attributes are for configuring the aggregation type
+ * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
+ * time limit.
+ */
+static ssize_t ipa_show_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ memset(str, 0, sizeof(str));
+
+ switch (ipa_ctx->aggregation_type) {
+ case IPA_MBIM_16:
+ strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
+ break;
+ case IPA_MBIM_32:
+ strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
+ break;
+ case IPA_TLP:
+ strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
+ break;
+ default:
+ strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
+ break;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
+ ipa_ctx->aggregation_type = IPA_MBIM_32;
+ else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
+ ipa_ctx->aggregation_type = IPA_TLP;
+ else {
+ IPAERR("ipa_store_aggregation_type wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_type,
+ ipa_store_aggregation_type);
+
+static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
+ ipa_ctx->aggregation_byte_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+ char *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_byte_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_byte_limit,
+ ipa_store_aggregation_byte_limit);
+
+static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf,
+ PAGE_SIZE,
+ "%u\n",
+ ipa_ctx->aggregation_time_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_time_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_time_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_time_limit,
+ ipa_store_aggregation_time_limit);
+
+static const struct file_operations ipa_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_open,
+ .read = ipa_read,
+ .unlocked_ioctl = ipa_ioctl,
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+ ipa_cnoc_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(ipa_cnoc_clk)) {
+ ipa_cnoc_clk = NULL;
+ IPAERR("fail to get cnoc clk\n");
+ return -ENODEV;
+ }
+
+ ipa_clk_src = clk_get(dev, "core_src_clk");
+ if (IS_ERR(ipa_clk_src)) {
+ ipa_clk_src = NULL;
+ IPAERR("fail to get ipa clk src\n");
+ return -ENODEV;
+ }
+
+ ipa_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(ipa_clk)) {
+ ipa_clk = NULL;
+ IPAERR("fail to get ipa clk\n");
+ return -ENODEV;
+ }
+
+ sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+ if (IS_ERR(sys_noc_ipa_axi_clk)) {
+ sys_noc_ipa_axi_clk = NULL;
+ IPAERR("fail to get sys_noc_ipa_axi clk\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+ if (ipa_cnoc_clk) {
+ clk_prepare(ipa_cnoc_clk);
+ clk_enable(ipa_cnoc_clk);
+ clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+ } else {
+ WARN_ON(1);
+ }
+
+ if (ipa_clk_src)
+ clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_prepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_prepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_enable(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_enable(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+ if (sys_noc_ipa_axi_clk)
+ clk_disable_unprepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_disable_unprepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_cnoc_clk)
+ clk_disable_unprepare(ipa_cnoc_clk);
+ else
+ WARN_ON(1);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+ void *bam_cnfg_bits;
+
+ bam_cnfg_bits = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+ IPA_BAM_REMAP_SIZE);
+ if (!bam_cnfg_bits)
+ return -ENOMEM;
+ ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
+ IPA_BAM_CNFG_BITS_VAL);
+ iounmap(bam_cnfg_bits);
+
+ return 0;
+}
+/**
+* ipa_init() - Initialize the IPA Driver
+*@resource_p: contain platform specific values from DST file
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+* 1)parsed values from the dts file
+* 2)parameters passed to the module initialization
+* 3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+* is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+* routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p)
+{
+ int result = 0;
+ int i;
+ struct sps_bam_props bam_props = { 0 };
+ struct ipa_flt_tbl *flt_tbl;
+ struct ipa_rt_tbl_set *rset;
+
+ IPADBG("IPA init\n");
+
+ ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+ if (!ipa_ctx) {
+ IPAERR(":kzalloc err.\n");
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+
+ IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
+ ipa_ctx->polling_mode = polling_mode;
+ IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+ hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
+ ip6_flt_tbl_lcl);
+ ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
+ ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
+ ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
+ ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
+ ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
+
+ ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+
+ /* setup IPA register access */
+ ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
+ resource_p->ipa_mem_size);
+ if (!ipa_ctx->mmio) {
+ IPAERR(":ipa-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_remap;
+ }
+ /* do POR programming to setup HW */
+ result = ipa_init_hw();
+ if (result) {
+ IPAERR(":error initializing driver.\n");
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+ /* read how much SRAM is available for SW use */
+ ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST);
+
+ if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
+ IPAERR("SW expect more core memory, needed %d, avail %d\n",
+ IPA_RAM_END_OFST, ipa_ctx->smem_sz);
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+ /* register IPA with SPS driver */
+ bam_props.phys_addr = resource_p->bam_mem_base;
+ bam_props.virt_addr = ioremap(resource_p->bam_mem_base,
+ resource_p->bam_mem_size);
+ if (!bam_props.virt_addr) {
+ IPAERR(":bam-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_bam_remap;
+ }
+ bam_props.virt_size = resource_p->bam_mem_size;
+ bam_props.irq = resource_p->bam_irq;
+ bam_props.num_pipes = IPA_NUM_PIPES;
+ bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+ bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+
+ result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+ if (result) {
+ IPAERR(":bam register err.\n");
+ result = -ENODEV;
+ goto fail_bam_register;
+ }
+
+ if (ipa_setup_bam_cfg(resource_p)) {
+ IPAERR(":bam cfg err.\n");
+ result = -ENODEV;
+ goto fail_flt_rule_cache;
+ }
+
+ /* set up the default op mode */
+ ipa_ctx->mode = IPA_MODE_USB_DONGLE;
+
+ /* init the lookaside cache */
+ ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
+ sizeof(struct ipa_flt_entry), 0, 0, NULL);
+ if (!ipa_ctx->flt_rule_cache) {
+ IPAERR(":ipa flt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_flt_rule_cache;
+ }
+ ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
+ sizeof(struct ipa_rt_entry), 0, 0, NULL);
+ if (!ipa_ctx->rt_rule_cache) {
+ IPAERR(":ipa rt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_rule_cache;
+ }
+ ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
+ sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_cache) {
+ IPAERR(":ipa hdr cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_cache;
+ }
+ ipa_ctx->hdr_offset_cache =
+ kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
+ 0, 0, NULL);
+ if (!ipa_ctx->hdr_offset_cache) {
+ IPAERR(":ipa hdr off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_offset_cache;
+ }
+ ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
+ sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+ if (!ipa_ctx->rt_tbl_cache) {
+ IPAERR(":ipa rt tbl cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_tbl_cache;
+ }
+ ipa_ctx->tx_pkt_wrapper_cache =
+ kmem_cache_create("IPA TX PKT WRAPPER",
+ sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->tx_pkt_wrapper_cache) {
+ IPAERR(":ipa tx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tx_pkt_wrapper_cache;
+ }
+ ipa_ctx->rx_pkt_wrapper_cache =
+ kmem_cache_create("IPA RX PKT WRAPPER",
+ sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->rx_pkt_wrapper_cache) {
+ IPAERR(":ipa rx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rx_pkt_wrapper_cache;
+ }
+ ipa_ctx->tree_node_cache =
+ kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
+ NULL);
+ if (!ipa_ctx->tree_node_cache) {
+ IPAERR(":ipa tree node cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tree_node_cache;
+ }
+
+ /*
+ * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
+ * size 512 bytes
+ */
+ ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k", NULL,
+ IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
+ IPA_DMA_POOL_BOUNDARY);
+ if (!ipa_ctx->one_kb_no_straddle_pool) {
+ IPAERR("cannot setup 1kb alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
+ }
+
+ ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+ ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+ /* init the various list heads */
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+ }
+
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+ mutex_init(&ipa_ctx->lock);
+ mutex_init(&ipa_ctx->nat_mem.lock);
+
+ for (i = 0; i < IPA_A5_SYS_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
+ spin_lock_init(&ipa_ctx->sys[i].spinlock);
+ if (i != IPA_A5_WLAN_AMPDU_OUT)
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
+ else
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].wait_desc_list);
+ }
+
+ ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
+ if (!ipa_ctx->rx_wq) {
+ IPAERR(":fail to create rx wq\n");
+ result = -ENOMEM;
+ goto fail_rx_wq;
+ }
+
+ ipa_ctx->tx_wq = create_singlethread_workqueue("ipa tx wq");
+ if (!ipa_ctx->tx_wq) {
+ IPAERR(":fail to create tx wq\n");
+ result = -ENOMEM;
+ goto fail_tx_wq;
+ }
+
+ ipa_ctx->hdr_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
+ ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
+
+ atomic_set(&ipa_ctx->ipa_active_clients, 0);
+
+ result = ipa_bridge_init();
+ if (result) {
+ IPAERR("ipa bridge init err.\n");
+ result = -ENODEV;
+ goto fail_bridge_init;
+ }
+
+ /* setup the A5-IPA pipes */
+ if (ipa_setup_a5_pipes()) {
+ IPAERR(":failed to setup IPA-A5 pipes.\n");
+ result = -ENODEV;
+ goto fail_a5_pipes;
+ }
+
+ ipa_replenish_rx_cache();
+
+ /* init the filtering block */
+ ipa_commit_flt(IPA_IP_v4);
+ ipa_commit_flt(IPA_IP_v6);
+
+ /*
+ * setup an empty routing table in system memory, this will be used
+ * to delete a routing table cleanly and safely
+ */
+ ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+ ipa_ctx->empty_rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
+ &ipa_ctx->empty_rt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!ipa_ctx->empty_rt_tbl_mem.base) {
+ IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+ ipa_ctx->empty_rt_tbl_mem.size);
+ result = -ENOMEM;
+ goto fail_empty_rt_tbl;
+ }
+ memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+ ipa_ctx->empty_rt_tbl_mem.size);
+
+ /* setup the IPA pipe mem pool */
+ ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+ resource_p->ipa_pipe_mem_size);
+
+ ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+ result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+ ipa_ctx, DRV_NAME);
+ if (IS_ERR(ipa_ctx->dev)) {
+ IPAERR(":device_create err.\n");
+ result = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+ ipa_ctx->cdev.owner = THIS_MODULE;
+ ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ /* default aggregation parameters */
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ ipa_ctx->aggregation_byte_limit = 1;
+ ipa_ctx->aggregation_time_limit = 0;
+ IPADBG(":IPA driver init OK.\n");
+
+ /* gate IPA clocks */
+ ipa_disable_clks();
+
+ return 0;
+
+fail_cdev_add:
+ device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ if (ipa_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+ dma_free_coherent(NULL,
+ ipa_ctx->empty_rt_tbl_mem.size,
+ ipa_ctx->empty_rt_tbl_mem.base,
+ ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_empty_rt_tbl:
+ ipa_cleanup_rx();
+ ipa_teardown_a5_pipes();
+fail_a5_pipes:
+ ipa_bridge_cleanup();
+fail_bridge_init:
+ destroy_workqueue(ipa_ctx->tx_wq);
+fail_tx_wq:
+ destroy_workqueue(ipa_ctx->rx_wq);
+fail_rx_wq:
+ dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+fail_dma_pool:
+ kmem_cache_destroy(ipa_ctx->tree_node_cache);
+fail_tree_node_cache:
+ kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+ kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+ kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+ sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_bam_register:
+ iounmap(bam_props.virt_addr);
+fail_bam_remap:
+fail_init_hw:
+ iounmap(ipa_ctx->mmio);
+fail_remap:
+ kfree(ipa_ctx);
+ ipa_ctx = NULL;
+fail_mem:
+ /* gate IPA clocks */
+ ipa_disable_clks();
+ return result;
+}
+
+static int ipa_plat_drv_probe(struct platform_device *pdev_p)
+{
+ int result = 0;
+ struct resource *resource_p;
+ IPADBG("IPA plat drv probe\n");
+
+ /* initialize ipa_res */
+ ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+ ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+
+ result = ipa_load_pipe_connection(pdev_p,
+ A2_TO_IPA,
+ &ipa_res.a2_to_ipa_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
+ &ipa_res.ipa_to_a2_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ /* Get IPA wrapper address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_mem_base = resource_p->start;
+ ipa_res.ipa_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA BAM address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "bam-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_mem_base = resource_p->start;
+ ipa_res.bam_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA pipe mem start ofst */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-pipe-mem");
+
+ if (!resource_p) {
+ IPADBG(":get resource failed for ipa-pipe-mem\n");
+ } else {
+ ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
+ ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "ipa-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_irq = resource_p->start;
+ }
+
+ /* Get IPA BAM IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "bam-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_irq = resource_p->start;
+ }
+
+ IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
+ ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
+ IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
+ ipa_res.bam_mem_base, ipa_res.bam_mem_size);
+ IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
+ ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
+
+ IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
+ IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
+
+ /* stash the IPA dev ptr */
+ ipa_dev = &pdev_p->dev;
+
+ /* get IPA clocks */
+ if (ipa_get_clks(ipa_dev) != 0)
+ return -ENODEV;
+
+ /* enable IPA clocks */
+ ipa_enable_clks();
+
+ /* Proceed to real initialization */
+ result = ipa_init(&ipa_res);
+ if (result)
+ IPAERR("ipa_init failed\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_type);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_byte_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_time_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ return result;
+}
+
+static struct platform_driver ipa_plat_drv = {
+ .probe = ipa_plat_drv_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipa_plat_drv_match,
+ },
+};
+
+static int ipa_plat_drv_init(void)
+{
+ return platform_driver_register(&ipa_plat_drv);
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+ return ipa_ctx;
+}
+
+static int __init ipa_module_init(void)
+{
+ int result = 0;
+
+ IPADBG("IPA module init\n");
+ ipa_debugfs_init();
+ /* Register as a platform device driver */
+ result = ipa_plat_drv_init();
+
+ return result;
+}
+
+late_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
new file mode 100644
index 0000000..cf51ab6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include "ipa_i.h"
+
+enum ipa_bridge_id {
+ IPA_DL_FROM_A2,
+ IPA_DL_TO_IPA,
+ IPA_UL_FROM_IPA,
+ IPA_UL_TO_A2,
+ IPA_BRIDGE_ID_MAX
+};
+
+static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
+static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
+static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+
+struct ipa_pkt_info {
+ void *buffer;
+ dma_addr_t dma_address;
+ uint32_t len;
+ struct list_head list_node;
+};
+
+struct ipa_bridge_pipe_context {
+ struct list_head head_desc_list;
+ struct sps_pipe *pipe;
+ struct sps_connect connection;
+ struct sps_mem_buffer desc_mem_buf;
+ struct sps_register_event register_event;
+ spinlock_t spinlock;
+ u32 len;
+ u32 free_len;
+ struct list_head free_desc_list;
+};
+
+static struct ipa_bridge_pipe_context bridge[IPA_BRIDGE_ID_MAX];
+
+static struct workqueue_struct *ipa_ul_workqueue;
+static struct workqueue_struct *ipa_dl_workqueue;
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir);
+
+static u32 alloc_cnt[IPA_DIR_MAX];
+
+static void ul_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_UL);
+}
+
+static void dl_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_DL);
+}
+
+static DECLARE_WORK(ul_work, ul_work_func);
+static DECLARE_WORK(dl_work, dl_work_func);
+
+static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->register_event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int queue_rx_single(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_pkt_info *info;
+ int ret;
+
+ info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
+ if (!info) {
+ IPAERR("unable to alloc rx_pkt_info\n");
+ goto fail_pkt;
+ }
+
+ info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!info->buffer) {
+ IPAERR("unable to alloc rx_pkt_buffer\n");
+ goto fail_buffer;
+ }
+
+ info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (info->dma_address == 0 || info->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)info->dma_address, info->buffer);
+ goto fail_dma;
+ }
+
+ info->len = ~0;
+
+ list_add_tail(&info->list_node, &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
+ IPA_RX_SKB_SIZE, info,
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&info->list_node);
+ dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_dma;
+ }
+ sys_rx->len++;
+ return 0;
+
+fail_dma:
+ kfree(info->buffer);
+fail_buffer:
+ kfree(info);
+fail_pkt:
+ IPAERR("failed\n");
+ return -ENOMEM;
+}
+
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
+ struct ipa_pkt_info *tx_pkt;
+ struct ipa_pkt_info *rx_pkt;
+ struct ipa_pkt_info *tmp_pkt;
+ struct sps_iovec iov;
+ int ret;
+ int inactive_cycles = 0;
+
+ while (1) {
+ ++inactive_cycles;
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_tx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_move_tail(&tx_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->len--;
+ sys_tx->free_len++;
+ tx_pkt->len = ~0;
+ }
+
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_rx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ rx_pkt = list_first_entry(&sys_rx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&rx_pkt->list_node);
+ sys_rx->len--;
+ rx_pkt->len = iov.size;
+
+retry_alloc_tx:
+ if (list_empty(&sys_tx->free_desc_list)) {
+ tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
+ GFP_KERNEL);
+ if (!tmp_pkt) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+ __func__);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp_pkt->buffer) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+ __func__);
+ kfree(tmp_pkt);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->dma_address = dma_map_single(NULL,
+ tmp_pkt->buffer,
+ IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (tmp_pkt->dma_address == 0 ||
+ tmp_pkt->dma_address == ~0) {
+ pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+ __func__,
+ (void *)tmp_pkt->dma_address,
+ tmp_pkt->buffer);
+ }
+
+ list_add_tail(&tmp_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->free_len++;
+ alloc_cnt[dir]++;
+
+ tmp_pkt->len = ~0;
+ }
+
+ tx_pkt = list_first_entry(&sys_tx->free_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&tx_pkt->list_node);
+ sys_tx->free_len--;
+
+retry_add_rx:
+ list_add_tail(&tx_pkt->list_node,
+ &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe,
+ tx_pkt->dma_address,
+ IPA_RX_SKB_SIZE,
+ tx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&tx_pkt->list_node);
+ pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+ __func__, ret);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_rx;
+ }
+ sys_rx->len++;
+
+retry_add_tx:
+ list_add_tail(&rx_pkt->list_node,
+ &sys_tx->head_desc_list);
+ ret = sps_transfer_one(sys_tx->pipe,
+ rx_pkt->dma_address,
+ iov.size,
+ rx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+ __func__, dir);
+ list_del(&rx_pkt->list_node);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_tx;
+ }
+ sys_tx->len++;
+ }
+
+ if (inactive_cycles >= polling_inactivity[dir]) {
+ ipa_switch_to_intr_mode(dir);
+ break;
+ }
+ }
+}
+
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_UL);
+ queue_work(ipa_ul_workqueue, &ul_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct ipa_ep_cfg_mode mode;
+ dma_addr_t dma_addr;
+ int ipa_ep_idx;
+ int ret;
+ int i;
+
+ if (dir == IPA_DL) {
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_PROD);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_TO_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = ipa_ctx->bam_handle;
+ sys->connection.dest_pipe_index = ipa_ep_idx;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+
+ mode.mode = IPA_DMA;
+ mode.dst = IPA_CLIENT_USB_CONS;
+ ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
+ if (ret < 0) {
+ IPAERR("DMA mode set error %d\n", ret);
+ goto tx_mode_set_failed;
+ }
+
+ return 0;
+
+tx_mode_set_failed:
+ sps_disconnect(sys->pipe);
+tx_connect_failed:
+ dma_free_coherent(NULL, sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else {
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_CONS);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = ipa_ctx->bam_handle;
+ sys->connection.src_pipe_index = 7;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = ipa_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+static void bam_mux_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_DL);
+ queue_work(ipa_dl_workqueue, &dl_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct a2_mux_pipe_connection pipe_conn = { 0, };
+ dma_addr_t dma_addr;
+ u32 a2_handle;
+ int ret;
+ int i;
+
+ if (dir == IPA_UL) {
+ ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
+ goto tx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_TO_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = a2_handle;
+ sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ return 0;
+
+tx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else { /* dir == IPA_UL */
+
+ ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
+ goto rx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_FROM_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = a2_handle;
+ sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = bam_mux_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+/**
+ * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL
+ * workqueues
+ *
+ * Return codes: 0: success, -ENOMEM: failure
+ */
+int ipa_bridge_init(void)
+{
+ int ret;
+
+ ipa_ul_workqueue = alloc_workqueue("ipa_ul",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_ul_workqueue) {
+ IPAERR("ipa ul wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_ul;
+ }
+
+ ipa_dl_workqueue = alloc_workqueue("ipa_dl",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_dl_workqueue) {
+ IPAERR("ipa dl wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_dl;
+ }
+
+ return 0;
+fail_dl:
+ destroy_workqueue(ipa_ul_workqueue);
+fail_ul:
+ return ret;
+}
+
+/**
+ * ipa_bridge_setup() - setup tethered SW bridge in specified direction
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: success
+ * various negative error codes on errors
+ */
+int ipa_bridge_setup(enum ipa_bridge_dir dir)
+{
+ int ret;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (setup_bridge_to_a2(dir)) {
+ IPAERR("fail to setup SYS pipe to A2 %d\n", dir);
+ ret = -EINVAL;
+ goto bail_a2;
+ }
+
+ if (setup_bridge_to_ipa(dir)) {
+ IPAERR("fail to setup SYS pipe to IPA %d\n", dir);
+ ret = -EINVAL;
+ goto bail_ipa;
+ }
+
+ return 0;
+
+bail_ipa:
+ if (dir == IPA_UL)
+ sps_disconnect(bridge[IPA_UL_TO_A2].pipe);
+ else
+ sps_disconnect(bridge[IPA_DL_FROM_A2].pipe);
+bail_a2:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+ return ret;
+}
+
+/**
+ * ipa_bridge_teardown() - teardown the tethered bridge in the specified dir
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: always
+ */
+int ipa_bridge_teardown(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+
+ if (dir == IPA_UL) {
+ sys = &bridge[IPA_UL_TO_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sps_disconnect(sys->pipe);
+ } else {
+ sys = &bridge[IPA_DL_FROM_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_DL_TO_IPA];
+ sps_disconnect(sys->pipe);
+ }
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+
+/**
+ * ipa_bridge_cleanup() - de-initialize the tethered bridge
+ *
+ * Return codes:
+ * None
+ */
+void ipa_bridge_cleanup(void)
+{
+ destroy_workqueue(ipa_dl_workqueue);
+ destroy_workqueue(ipa_ul_workqueue);
+}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
new file mode 100644
index 0000000..823b17d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+ struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+ int result = -EFAULT;
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP alloc failed EP.\n");
+ return -EFAULT;
+ }
+
+ result = sps_get_config(ep->ep_hdl,
+ &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ return -EFAULT;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination =
+ in->client_bam_hdl;
+ ep->connect.source = ipa_ctx->bam_handle;
+ ep->connect.dest_pipe_index =
+ in->client_ep_idx;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = in->client_bam_hdl;
+ ep->connect.destination = ipa_ctx->bam_handle;
+ ep->connect.src_pipe_index = in->client_ep_idx;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+ struct sps_mem_buffer *mem_buff_ptr,
+ bool *fifo_in_pipe_mem_ptr,
+ u32 *fifo_pipe_mem_ofst_ptr,
+ u32 fifo_size, int ipa_ep_idx)
+{
+ dma_addr_t dma_addr;
+ u32 ofst;
+ int result = -EFAULT;
+
+ mem_buff_ptr->size = fifo_size;
+ if (in->pipe_mem_preferred) {
+ if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+ IPAERR("FIFO pipe mem alloc fail ep %u\n",
+ ipa_ep_idx);
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL,
+ mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ } else {
+ memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+ result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+ fifo_size, 1);
+ WARN_ON(result);
+ *fifo_in_pipe_mem_ptr = 1;
+ dma_addr = mem_buff_ptr->phys_base;
+ *fifo_pipe_mem_ofst_ptr = ofst;
+ }
+ } else {
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL, mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ }
+ mem_buff_ptr->phys_base = dma_addr;
+ if (mem_buff_ptr->base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int ipa_ep_idx_dst;
+ int result = -EFAULT;
+ struct ipa_ep_context *ep;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+ in->client >= IPA_CLIENT_MAX ||
+ in->ipa_ep_cfg.mode.dst >= IPA_CLIENT_MAX ||
+ in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_PROD(in->client) &&
+ (in->ipa_ep_cfg.mode.mode == IPA_DMA)) {
+ ipa_ep_idx_dst = ipa_get_ep_mapping(ipa_ctx->mode,
+ in->ipa_ep_cfg.mode.dst);
+ if ((ipa_ep_idx_dst == -1) ||
+ (ipa_ctx->ep[ipa_ep_idx_dst].valid)) {
+ IPADBG("dst EP for IPA input pipe doesn't yet exist\n");
+ }
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ep->valid = 1;
+ ep->client = in->client;
+ ep->notify = in->notify;
+ ep->priv = in->priv;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to configure SPS.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (in->desc.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+ &ep->desc_fifo_in_pipe_mem,
+ &ep->desc_fifo_pipe_mem_ofst,
+ in->desc_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DESC FIFO.\n");
+ goto desc_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DESC FIFO\n");
+ ep->connect.desc = in->desc;
+ ep->desc_fifo_client_allocated = 1;
+ }
+ IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
+ ep->connect.desc.size);
+
+ if (in->data.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+ &ep->data_fifo_in_pipe_mem,
+ &ep->data_fifo_pipe_mem_ofst,
+ in->data_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DATA FIFO.\n");
+ goto data_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DATA FIFO\n");
+ ep->connect.data = in->data;
+ ep->data_fifo_client_allocated = 1;
+ }
+ IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
+ ep->connect.data.size);
+
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+ ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+
+ result = sps_connect(ep->ep_hdl, &ep->connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto sps_connect_fail;
+ }
+
+ sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+ sps->ipa_ep_idx = ipa_ep_idx;
+ *clnt_hdl = ipa_ep_idx;
+ memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+ memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+ return 0;
+
+sps_connect_fail:
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+
+data_mem_alloc_fail:
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+
+desc_mem_alloc_fail:
+ sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ result = sps_disconnect(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS disconnect failed.\n");
+ return -EPERM;
+ }
+
+ if (!ep->desc_fifo_client_allocated &&
+ ep->connect.desc.base) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+
+ if (!ep->data_fifo_client_allocated &&
+ ep->connect.data.base) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+
+ result = sps_free_endpoint(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS de-alloc EP failed.\n");
+ return -EPERM;
+ }
+
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
new file mode 100644
index 0000000..43b0178d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -0,0 +1,507 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "ipa_i.h"
+
+
+#define IPA_MAX_MSG_LEN 1024
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static s8 ep_reg_idx;
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_FILTER=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n"
+ "IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST),
+ ipa_read_reg(ipa_ctx->mmio,
+ IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option >= IPA_NUM_PIPES) {
+ IPAERR("bad pipe specified %u\n", option);
+ return count;
+ }
+
+ ep_reg_idx = option;
+
+ return count;
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int start_idx;
+ int end_idx;
+ int size = 0;
+ int ret;
+ loff_t pos;
+
+ /* negative ep_reg_idx means all registers */
+ if (ep_reg_idx < 0) {
+ start_idx = 0;
+ end_idx = IPA_NUM_PIPES;
+ } else {
+ start_idx = ep_reg_idx;
+ end_idx = start_idx + 1;
+ }
+ pos = *ppos;
+ for (i = start_idx; i < end_idx; i++) {
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_n_OFST(i)));
+ *ppos = pos;
+ ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+ nbytes);
+ if (ret < 0)
+ return ret;
+
+ size += ret;
+ ubuf += nbytes;
+ count -= nbytes;
+ }
+
+ *ppos = pos + size;
+ return size;
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_hdr_entry *entry;
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "name:%s len=%d ref=%d partial=%d lcl=%d ofst=%u ",
+ entry->name,
+ entry->hdr_len, entry->ref_cnt,
+ entry->is_partial,
+ ipa_ctx->hdr_tbl_lcl,
+ entry->offset_entry->offset >> 2);
+ for (i = 0; i < entry->hdr_len; i++) {
+ scnprintf(dbg_buff + cnt + nbytes + i * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - i * 2,
+ "%02x", entry->hdr[i]);
+ }
+ scnprintf(dbg_buff + cnt + nbytes + entry->hdr_len * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - entry->hdr_len * 2,
+ "\n");
+ cnt += nbytes + entry->hdr_len * 2 + 1;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static int ipa_attrib_dump(char *buff, size_t sz,
+ struct ipa_rule_attrib *attrib, enum ipa_ip_type ip)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ uint32_t addr[4];
+ uint32_t mask[4];
+ int i;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tos:%d ",
+ attrib->u.v4.tos);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "protocol:%d ",
+ attrib->u.v4.protocol);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.src_addr);
+ mask[0] = htonl(attrib->u.v4.src_addr_mask);
+ nbytes = scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI4 src_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.src_addr[i]);
+ mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI6 src_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.dst_addr);
+ mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+ mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "src_port_range:%u %u ",
+ attrib->src_port_lo,
+ attrib->src_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "dst_port_range:%u %u ",
+ attrib->dst_port_lo,
+ attrib->dst_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "type:%d ",
+ attrib->type);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "code:%d ",
+ attrib->code);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "spi:%x ",
+ attrib->spi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "src_port:%u ",
+ attrib->src_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "dst_port:%u ",
+ attrib->dst_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tc:%d ",
+ attrib->u.v6.tc);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "flow_label:%x ",
+ attrib->u.v6.flow_label);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "next_hdr:%d ",
+ attrib->u.v6.next_hdr);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "metadata:%x metadata_mask:%x",
+ attrib->meta_data, attrib->meta_data_mask);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "frg ");
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(buff + cnt, sz - cnt, "\n");
+ cnt += nbytes;
+
+ return cnt;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 hdr_ofst;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (entry->hdr)
+ hdr_ofst = entry->hdr->offset_entry->offset;
+ else
+ hdr_ofst = 0;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "tbl_idx:%d tbl_name:%s tbl_ref:%u rule_idx:%d dst:%d ep:%d S:%u hdr_ofst[words]:%u attrib_mask:%08x ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt, i, entry->rule.dst,
+ ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst),
+ !ipa_ctx->hdr_tbl_lcl,
+ hdr_ofst >> 2,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i;
+ int j;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ struct ipa_rt_tbl *rt_tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib, ip);
+ i++;
+ }
+
+ for (j = 0; j < IPA_NUM_PIPES; j++) {
+ tbl = &ipa_ctx->flt_tbl[j][ip];
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ j, i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt +=
+ ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+ .read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+ .read = ipa_read_ep_reg,
+ .write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_hdr_ops = {
+ .read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+ .read = ipa_read_rt,
+ .open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_flt_ops = {
+ .read = ipa_read_flt,
+ .open = ipa_open_dbg,
+};
+
+void ipa_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+ &ipa_gen_reg_ops);
+ if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+ IPAERR("fail to create file for debug_fs gen_reg\n");
+ goto fail;
+ }
+
+ dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+ &ipa_ep_reg_ops);
+ if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+ IPAERR("fail to create file for debug_fs ep_reg\n");
+ goto fail;
+ }
+
+ dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+ &ipa_hdr_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs hdr\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_rt_ops);
+ if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+ IPAERR("fail to create file for debug_fs ip4 rt\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_rt_ops);
+ if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+ IPAERR("fail to create file for debug_fs ip6:w" " rt\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_flt_ops);
+ if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_flt_ops);
+ if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+ if (IS_ERR(dent)) {
+ IPAERR("ipa_debugfs_remove: folder was not created.\n");
+ return;
+ }
+ debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
new file mode 100644
index 0000000..c677a6e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+/**
+ * ipa_write_done - this function will be (enevtually) called when a Tx
+ * operation is complete
+ * @work: work_struct used by the work queue
+ */
+void ipa_write_done(struct work_struct *work)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+ unsigned long irq_flags;
+ struct ipa_mem_buffer mult = { 0 };
+ int i;
+ u16 cnt;
+
+ tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+ cnt = tx_pkt->cnt;
+ IPADBG("cnt=%d\n", cnt);
+
+ if (unlikely(cnt == 0))
+ WARN_ON(1);
+
+ if (cnt > 1 && cnt != 0xFFFF)
+ mult = tx_pkt->mult;
+
+ for (i = 0; i < cnt; i++) {
+ if (unlikely(tx_pkt == NULL))
+ WARN_ON(1);
+ spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
+ tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper,
+ link);
+ if (unlikely(tx_pkt != tx_pkt_expected)) {
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
+ irq_flags);
+ WARN_ON(1);
+ }
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ tx_pkt->sys->len--;
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ if (tx_pkt->callback)
+ tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+
+ if (mult.phys_base)
+ dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ unsigned long irq_flags;
+ int result;
+ u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+ dma_addr_t dma_address;
+ u16 len;
+
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto fail_mem_alloc;
+ }
+
+ WARN_ON(desc->len > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the packet does not
+ * cross a 1KB boundary
+ */
+ tx_pkt->bounce = dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+ GFP_KERNEL, &dma_address);
+ if (!tx_pkt->bounce) {
+ dma_address = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary
+ ((u32)dma_address, (u32)dma_address + desc->len - 1,
+ 1024));
+ memcpy(tx_pkt->bounce, desc->pyld, desc->len);
+ }
+
+ if (!dma_address) {
+ IPAERR("failed to DMA wrap\n");
+ goto fail_dma_map;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc->type;
+ tx_pkt->cnt = 1; /* only 1 desc in this "set" */
+
+ tx_pkt->mem.phys_base = dma_address;
+ tx_pkt->mem.base = desc->pyld;
+ tx_pkt->mem.size = desc->len;
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc->callback;
+ tx_pkt->user1 = desc->user1;
+ tx_pkt->user2 = desc->user2;
+
+ /*
+ * Special treatment for immediate commands, where the structure of the
+ * descriptor is different
+ */
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ sps_flags |= SPS_IOVEC_FLAG_IMME;
+ len = desc->opcode;
+ } else {
+ len = desc->len;
+ }
+
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+ sps_flags);
+ if (result) {
+ IPAERR("sps_transfer_one failed rc=%d\n", result);
+ goto fail_sps_send;
+ }
+
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ return 0;
+
+fail_sps_send:
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ dma_address);
+fail_dma_map:
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+ return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct sps_transfer transfer = { 0 };
+ struct sps_iovec *iovec;
+ unsigned long irq_flags;
+ dma_addr_t dma_addr;
+ int i;
+ int j;
+ int result;
+ int fail_dma_wrap;
+ uint size = num_desc * sizeof(struct sps_iovec);
+
+ for (i = 0; i < num_desc; i++) {
+ fail_dma_wrap = 0;
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto failure;
+ }
+ /*
+ * first desc of set is "special" as it holds the count and
+ * other info
+ */
+ if (i == 0) {
+ transfer.user = tx_pkt;
+ transfer.iovec =
+ dma_alloc_coherent(NULL, size, &dma_addr, 0);
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ if (!transfer.iovec) {
+ IPAERR("fail alloc DMA mem for sps xfr buff\n");
+ goto failure;
+ }
+
+ tx_pkt->mult.phys_base = dma_addr;
+ tx_pkt->mult.base = transfer.iovec;
+ tx_pkt->mult.size = size;
+ tx_pkt->cnt = num_desc;
+ }
+
+ iovec = &transfer.iovec[i];
+ iovec->flags = 0;
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc[i].type;
+
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ WARN_ON(tx_pkt->mem.size > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the
+ * packet does not cross a 1KB boundary
+ */
+ tx_pkt->bounce =
+ dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool, GFP_KERNEL,
+ &tx_pkt->mem.phys_base);
+ if (!tx_pkt->bounce) {
+ tx_pkt->mem.phys_base = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary(
+ (u32)tx_pkt->mem.phys_base,
+ (u32)tx_pkt->mem.phys_base +
+ tx_pkt->mem.size - 1, 1024));
+ memcpy(tx_pkt->bounce, tx_pkt->mem.base,
+ tx_pkt->mem.size);
+ }
+
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to alloc tx wrapper\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc[i].callback;
+ tx_pkt->user1 = desc[i].user1;
+ tx_pkt->user2 = desc[i].user2;
+
+ iovec->addr = tx_pkt->mem.phys_base;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /*
+ * Special treatment for immediate commands, where the structure
+ * of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ iovec->size = desc[i].opcode;
+ iovec->flags |= SPS_IOVEC_FLAG_IMME;
+ } else {
+ iovec->size = desc[i].len;
+ }
+
+ if (i == (num_desc - 1)) {
+ iovec->flags |= (SPS_IOVEC_FLAG_EOT |
+ SPS_IOVEC_FLAG_INT);
+ /* "mark" the last desc */
+ tx_pkt->cnt = 0xFFFF;
+ }
+ }
+
+ result = sps_transfer(sys->ep->ep_hdl, &transfer);
+ if (result) {
+ IPAERR("sps_transfer failed rc=%d\n", result);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+ tx_pkt = transfer.user;
+ for (j = 0; j < i; j++) {
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+ if (i < num_desc)
+ /* last desc failed */
+ if (fail_dma_wrap)
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ if (transfer.iovec_phys)
+ dma_free_coherent(NULL, size, transfer.iovec,
+ transfer.iovec_phys);
+
+ return -EFAULT;
+}
+
+/**
+ * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * immediate command is complete.
+ * @user1: pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_cmd_ack(void *user1, void *user2)
+{
+ struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+ if (!desc)
+ WARN_ON(1);
+ IPADBG("got ack for cmd=%d\n", desc->opcode);
+ complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc: number of descriptors within the descr struct
+ * @descr: descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+ struct ipa_desc *desc;
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ /* client should not set these */
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa_cmd_ack;
+ descr->user1 = descr;
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
+ IPAERR("fail to send immediate command\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ /* client should not set these */
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa_cmd_ack;
+ desc->user1 = desc;
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_tx_notify() - Callback function which will be called by the SPS driver
+ * after a Tx operation is complete. Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ */
+static void ipa_tx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ tx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ * - Disconnect the packet from the system pipe linked list
+ * - Unmap the packets skb, make it non DMAable
+ * - Free the packet from the cache
+ * - Prepare a proper skb
+ * - Call the endpoints notify function, passing the skb in the parameters
+ * - Replenish the rx cache
+ */
+void ipa_handle_rx_core(void)
+{
+ struct ipa_a5_mux_hdr *mux_hdr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct sk_buff *rx_skb;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ u16 pull_len;
+ u16 padding;
+ int ret;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+ struct ipa_ep_context *ep;
+
+ do {
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+
+ /* Break the loop when there are no more packets to receive */
+ if (iov.addr == 0)
+ break;
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ if (list_empty(&sys->head_desc_list))
+ WARN_ON(1);
+ rx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_rx_pkt_wrapper, link);
+ if (!rx_pkt)
+ WARN_ON(1);
+ rx_pkt->len = iov.size;
+ sys->len--;
+ list_del(&rx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ IPADBG("--curr_cnt=%d\n", sys->len);
+
+ rx_skb = rx_pkt->skb;
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ /*
+ * make it look like a real skb, "data" was already set at
+ * alloc time
+ */
+ rx_skb->tail = rx_skb->data + rx_pkt->len;
+ rx_skb->len = rx_pkt->len;
+ rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+
+ mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+ IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ mux_hdr->src_pipe_index,
+ mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+ IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+ if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
+ IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
+ mux_hdr->src_pipe_index,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+ dev_kfree_skb_any(rx_skb);
+ ipa_replenish_rx_cache();
+ continue;
+ }
+
+ ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
+ pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+ /*
+ * IP packet starts on word boundary
+ * remove the MUX header and any padding and pass the frame to
+ * the client which registered a rx callback on the "src pipe"
+ */
+ padding = ep->cfg.hdr.hdr_len & 0x3;
+ if (padding)
+ pull_len += 4 - padding;
+
+ IPADBG("pulling %d bytes from skb\n", pull_len);
+ skb_pull(rx_skb, pull_len);
+ ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ ipa_replenish_rx_cache();
+ } while (1);
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(void)
+{
+ int ret;
+ struct ipa_sys_context *sys;
+
+ IPADBG("Enter");
+ if (!ipa_ctx->curr_polling_state) {
+ IPAERR("already in intr mode\n");
+ return;
+ }
+
+ sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ return;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_handle_rx_core();
+ ipa_ctx->curr_polling_state = 0;
+}
+
+/**
+ * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
+ */
+static void ipa_rx_switch_to_poll_mode(void)
+{
+ int ret;
+ struct ipa_ep_context *ep;
+
+ IPADBG("Enter");
+ ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+ ret = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_ctx->curr_polling_state = 1;
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify: SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (!ipa_ctx->curr_polling_state) {
+ ipa_rx_switch_to_poll_mode();
+ rx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+ }
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and config EP
+ * @clnt_hdl: [out] client handle
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int sys_idx = -1;
+ int result = -EFAULT;
+ dma_addr_t dma_addr;
+
+ if (sys_in == NULL || clnt_hdl == NULL ||
+ sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail_bad_param;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail_bad_param;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
+ IPAERR("EP already allocated.\n");
+ goto fail_bad_param;
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+ ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_sps_api;
+ }
+
+ /* Default Config */
+ ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
+
+ if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
+ IPAERR("SPS EP allocation failed.\n");
+ goto fail_sps_api;
+ }
+
+ result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ goto fail_mem_alloc;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
+ } else {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |=
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
+ dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ &dma_addr, 0);
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
+ if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
+ IPAERR("fail to get DMA desc memory.\n");
+ goto fail_mem_alloc;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+ result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto fail_sps_connect;
+ }
+
+ switch (ipa_ep_idx) {
+ case 1:
+ /* fall through */
+ case 2:
+ /* fall through */
+ case 3:
+ sys_idx = ipa_ep_idx;
+ break;
+ case 15:
+ sys_idx = IPA_A5_WLAN_AMPDU_OUT;
+ break;
+ default:
+ IPAERR("Invalid EP index.\n");
+ result = -EFAULT;
+ goto fail_register_event;
+ }
+
+ if (!ipa_ctx->polling_mode) {
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("rx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ } else {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("tx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ }
+ }
+
+ return 0;
+
+fail_register_event:
+ sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_connect:
+ dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
+fail_mem_alloc:
+ sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_api:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_bad_param:
+ return result;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
+ ipa_ctx->ep[clnt_hdl].connect.desc.base,
+ ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
+ sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+/**
+ * ipa_tx_comp() - Callback function which will call the user supplied callback
+ * function to release the skb, or release it on its own if no callback function
+ * was supplied.
+ * @user1
+ * @user2
+ */
+static void ipa_tx_comp(void *user1, void *user2)
+{
+ struct sk_buff *skb = (struct sk_buff *)user1;
+ u32 ep_idx = (u32)user2;
+
+ IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+ if (ipa_ctx->ep[ep_idx].notify)
+ ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)skb);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client calback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ struct ipa_desc desc[2];
+ int ipa_ep_idx;
+ struct ipa_ip_packet_init *cmd;
+
+ memset(&desc, 0, 2 * sizeof(struct ipa_desc));
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
+ if (ipa_ep_idx == -1) {
+ IPAERR("dest EP does not exist.\n");
+ goto fail_gen;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("dest EP not valid.\n");
+ goto fail_gen;
+ }
+
+ if (IPA_CLIENT_IS_CONS(dst)) {
+ cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_mem_alloc;
+ }
+
+ cmd->destination_pipe_index = ipa_ep_idx;
+ if (meta && meta->mbim_stream_id_valid)
+ cmd->metadata = meta->mbim_stream_id;
+ desc[0].opcode = IPA_IP_PACKET_INIT;
+ desc[0].pyld = cmd;
+ desc[0].len = sizeof(struct ipa_ip_packet_init);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[1].pyld = skb->data;
+ desc[1].len = skb->len;
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].callback = ipa_tx_comp;
+ desc[1].user1 = skb;
+ desc[1].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send;
+ }
+ } else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
+ desc[0].pyld = skb->data;
+ desc[0].len = skb->len;
+ desc[0].type = IPA_DATA_DESC_SKB;
+ desc[0].callback = ipa_tx_comp;
+ desc[0].user1 = skb;
+ desc[0].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
+ &desc[0])) {
+ IPAERR("fail to send skb\n");
+ goto fail_gen;
+ }
+ } else {
+ IPAERR("%d PROD is not supported.\n", dst);
+ goto fail_gen;
+ }
+
+ return 0;
+
+fail_send:
+ kfree(cmd);
+fail_mem_alloc:
+fail_gen:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+void ipa_handle_rx(struct work_struct *work)
+{
+ ipa_handle_rx_core();
+ ipa_rx_switch_to_intr_mode();
+}
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ * - Allocate a buffer in the cache
+ * - Initialized the packets link
+ * - Initialize the packets work struct
+ * - Allocate the packets socket buffer (skb)
+ * - Fill the packets skb with data
+ * - Make the packet DMAable
+ * - Add the packet to the system pipe linked list
+ * - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+void ipa_replenish_rx_cache(void)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ rx_len_cached = sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /* true RX data path is not currently exercised so drop the ceil */
+ while (rx_len_cached < (IPA_RX_POOL_CEIL >> 3)) {
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+
+ rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+ if (rx_pkt->skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
+ rx_pkt->dma_address = dma_map_single(NULL, ptr,
+ IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->dma_address, ptr);
+ goto fail_dma_mapping;
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
+ IPA_RX_SKB_SIZE, rx_pkt,
+ SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+
+ IPADBG("++curr_cnt=%d\n", sys->len);
+ }
+
+ return;
+
+fail_sps_transfer:
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_del(&rx_pkt->link);
+ --sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+fail_dma_mapping:
+ dev_kfree_skb_any(rx_pkt->skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ return;
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+void ipa_cleanup_rx(void)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_rx_pkt_wrapper *r;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->head_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_pkt->skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
new file mode 100644
index 0000000..81f3a80
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE (4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3)
+#define IPA_FLT_BIT_MASK (0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_flt_entry *entry, u8 *buf)
+{
+ struct ipa_flt_rule_hw_hdr *hdr;
+ const struct ipa_flt_rule *rule =
+ (const struct ipa_flt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+ hdr->u.hdr.action = entry->rule.action;
+ hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+ hdr->u.hdr.rsvd = 0;
+ buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule %x\n", en_rule);
+
+ hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR("hw_len differs b/w passes passed=%x calc=%x\n",
+ entry->hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 total_sz = 0;
+ u32 rule_set_sz;
+ int i;
+
+ *hdr_sz = 0;
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("pipe %d len %d\n", i, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz +
+ IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+ }
+
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 hdr_top = 0;
+ int i;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer flt_tbl_mem;
+ u8 *ftbl_membody;
+
+ mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+ if (mem->size == 0) {
+ IPAERR("flt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the flt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* write a dummy header to move cursor */
+ hdr = ipa_write_32(hdr_top, hdr);
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ hdr_top |= IPA_FLT_BIT_MASK;
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody = ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ /* pipe "i" is at bit "i+1" */
+ hdr_top |= (1 << (i + 1));
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("ofst is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ body)) {
+ IPAERR("fail gen FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail alloc DMA buff size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("fail gen FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody =
+ ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+ }
+
+ /* now write the hdr_top */
+ ipa_write_32(hdr_top, base);
+
+ return 0;
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+ i, ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0,
+ sizeof(tbl->curr_mem));
+ }
+ }
+ }
+}
+
+static int __ipa_commit_flt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_filter_init *v4;
+ struct ipa_ip_v6_filter_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v4_filter_init);
+ } else {
+ avail = IPA_RAM_V6_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v6_filter_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_flt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_filter_init *)cmd;
+ desc.opcode = IPA_IP_V4_FILTER_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_FLT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_filter_init *)cmd;
+ desc.opcode = IPA_IP_V6_FILTER_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_FLT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_flt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (!rule->rt_tbl_hdl) {
+ IPAERR("flt rule does not point to valid RT tbl\n");
+ goto error;
+ }
+
+ if (ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rule->rt_tbl_hdl) == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (((struct ipa_rt_tbl *)rule->rt_tbl_hdl)->cookie != IPA_COOKIE) {
+ IPAERR("flt rule cookie is invalid\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc FLT rule object\n");
+ goto mem_alloc_fail;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->rule = *rule;
+ entry->cookie = IPA_COOKIE;
+ entry->rt_tbl = (struct ipa_rt_tbl *)rule->rt_tbl_hdl;
+ entry->tbl = tbl;
+ if (add_rear)
+ list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_flt_rule_list);
+ tbl->rule_cnt++;
+ entry->rt_tbl->ref_cnt++;
+ *rule_hdl = (u32)entry;
+ IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->flt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+mem_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+
+ return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+ struct ipa_flt_entry *entry = (struct ipa_flt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+
+ return -EPERM;
+ }
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ IPADBG("add global flt rule ip=%d\n", ip);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+ int ipa_ep_idx;
+
+ if (ip >= IPA_IP_MAX || rule == NULL || rule_hdl == NULL ||
+ ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
+ if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
+ ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+ IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int i;
+ int result;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (rules->global)
+ result = __ipa_add_global_flt_rule(rules->ip,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ else
+ result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_flt(rules->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_flt(hdls->ip)) {
+ mutex_unlock(&ipa_ctx->lock);
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ int result;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (__ipa_commit_flt(ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ struct ipa_flt_entry *next;
+ struct ipa_tree_node *node;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset flt ip=%d\n", ip);
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+ link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree,
+ (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
new file mode 100644
index 0000000..4b9a500
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 32, 64 };
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem: [out] buffer to put the header table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+ struct ipa_hdr_entry *entry;
+
+ mem->size = ipa_ctx->hdr_tbl.end;
+
+ if (mem->size == 0) {
+ IPAERR("hdr tbl empty\n");
+ return -EPERM;
+ }
+ IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ memset(mem->base, 0, mem->size);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+ entry->offset_entry->offset);
+ memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+ entry->hdr_len);
+ }
+
+ return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+static int __ipa_commit_hdr(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ struct ipa_hdr_init_local *cmd;
+ u16 len;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ /* the immediate command param size is same for both local and system */
+ len = sizeof(struct ipa_hdr_init_local);
+
+ /*
+ * we can use init_local ptr for init_system due to layout of the
+ * struct
+ */
+ cmd = kmalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_hdr_hw_tbl(mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+ IPA_RAM_HDR_SIZE);
+ goto fail_hw_tbl_gen;
+ }
+
+ cmd->hdr_table_addr = mem->phys_base;
+ if (ipa_ctx->hdr_tbl_lcl) {
+ cmd->size_hdr_table = mem->size;
+ cmd->hdr_addr = IPA_RAM_HDR_OFST;
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ } else {
+ desc.opcode = IPA_HDR_INIT_SYSTEM;
+ }
+ desc.pyld = cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ } else {
+ if (ipa_ctx->hdr_mem.phys_base) {
+ dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ }
+ ipa_ctx->hdr_mem = *mem;
+ }
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_offset_entry *offset;
+ struct ipa_tree_node *node;
+ u32 bin;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (hdr->hdr_len == 0) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc hdr object\n");
+ goto hdr_alloc_fail;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+ entry->hdr_len = hdr->hdr_len;
+ strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+ entry->is_partial = hdr->is_partial;
+ entry->cookie = IPA_COOKIE;
+
+ if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+ bin = IPA_HDR_BIN0;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+ bin = IPA_HDR_BIN1;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+ bin = IPA_HDR_BIN2;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+ bin = IPA_HDR_BIN3;
+ else {
+ IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ goto bad_hdr_len;
+ }
+
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc hdr offset object\n");
+ goto ofst_alloc_fail;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which are set
+ * in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ list_add(&entry->link, &htbl->head_hdr_entry_list);
+ htbl->hdr_cnt++;
+ IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
+ htbl->hdr_cnt, offset->offset);
+
+ hdr->hdr_hdl = (u32) entry;
+ node->hdl = hdr->hdr_hdl;
+ if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+ofst_alloc_fail:
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
+bad_hdr_len:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+hdr_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+static int __ipa_del_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (!entry || (entry->cookie != IPA_COOKIE) || (entry->ref_cnt != 0)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+ htbl->hdr_cnt, entry->offset_entry->offset);
+
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ list_del(&entry->link);
+ htbl->hdr_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdrs == NULL || hdrs->num_hdrs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdrs->num_hdrs; i++) {
+ if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ IPAERR("failed to add hdr %d\n", i);
+ hdrs->hdr[i].status = -1;
+ } else {
+ hdrs->hdr[i].status = 0;
+ }
+ }
+
+ if (hdrs->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
+ * to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_dump_hdr() - prints all the headers in the header table in SW
+ *
+ * Note: Should not be called from atomic context
+ */
+void ipa_dump_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+
+ IPADBG("START\n");
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
+ entry->offset_entry->offset,
+ entry->offset_entry->bin);
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+ int result = -EFAULT;
+
+ /*
+ * issue a commit on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_commit_rt(IPA_IP_v4))
+ return -EPERM;
+ if (ipa_commit_rt(IPA_IP_v6))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_entry *next;
+ struct ipa_hdr_offset_entry *off_entry;
+ struct ipa_hdr_offset_entry *off_next;
+ struct ipa_tree_node *node;
+ int i;
+
+ /*
+ * issue a reset on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_reset_rt(IPA_IP_v4))
+ IPAERR("fail to reset v4 rt\n");
+ if (ipa_reset_rt(IPA_IP_v6))
+ IPAERR("fail to reset v4 rt\n");
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset hdr\n");
+ list_for_each_entry_safe(entry, next,
+ &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+ /* do not remove the default exception header */
+ if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX))
+ continue;
+
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ }
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+
+ /*
+ * do not remove the default exception header which is
+ * at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
+
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ }
+ /* there is one header of size 8 */
+ ipa_ctx->hdr_tbl.end = 8;
+ ipa_ctx->hdr_tbl.hdr_cnt = 1;
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+ struct ipa_hdr_entry *entry;
+
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists, if
+ * lookup succeeds the header entry ref cnt is increased
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -1;
+
+ if (lookup == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(lookup->name);
+ if (entry) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t) entry;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ int result = -EFAULT;
+
+ if (entry == NULL || entry->cookie != IPA_COOKIE ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0) {
+ if (__ipa_del_hdr(hdr_hdl)) {
+ IPAERR("fail to del hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ /* commit for put */
+ if (__ipa_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -EFAULT;
+
+ if (copy == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(copy->name);
+ if (entry) {
+ memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+ copy->hdr_len = entry->hdr_len;
+ copy->is_partial = entry->is_partial;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+
diff --git a/drivers/platform/msm/ipa/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_hw_defs.h
new file mode 100644
index 0000000..3131a84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hw_defs.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT (1)
+#define IPA_PPP_FRM_INIT (2)
+#define IPA_IP_V4_FILTER_INIT (3)
+#define IPA_IP_V6_FILTER_INIT (4)
+#define IPA_IP_V4_NAT_INIT (5)
+#define IPA_IP_V6_NAT_INIT (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL (9)
+#define IPA_HDR_INIT_SYSTEM (10)
+#define IPA_DECIPH_SETUP (11)
+#define IPA_INSERT_NAT_RULE (12)
+#define IPA_DELETE_NAT_RULE (13)
+#define IPA_NAT_DMA (14)
+#define IPA_IP_PACKET_TAG (15)
+#define IPA_IP_PACKET_INIT (16)
+
+#define IPA_INTERFACE_ID_EXCEPTION (0)
+#define IPA_INTERFACE_ID_A2_WWAN (0x10)
+#define IPA_INTERFACE_ID_HSUSB_RMNET1 (0x21)
+#define IPA_INTERFACE_ID_HSUSB_RMNET2 (0x22)
+#define IPA_INTERFACE_ID_HSUSB_RMNET3 (0x23)
+#define IPA_INTERFACE_ID_HSIC_WLAN_WAN (0x31)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN1 (0x32)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN2 (0x33)
+#define IPA_INTERFACE_ID_HSIC_RMNET1 (0x41)
+#define IPA_INTERFACE_ID_HSIC_RMNET2 (0x42)
+#define IPA_INTERFACE_ID_HSIC_RMNET3 (0x43)
+#define IPA_INTERFACE_ID_HSIC_RMNET4 (0x44)
+#define IPA_INTERFACE_ID_HSIC_RMNET5 (0x45)
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @rsvd: reserved
+ */
+struct ipa_flt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 action:5;
+ u32 rt_tbl_idx:5;
+ u32 rsvd:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ */
+struct ipa_rt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 pipe_dest_idx:5;
+ u32 system:1;
+ u32 hdr_offset:10;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_addr: address of header table
+ * @size_hdr_table: size of the above
+ * @hdr_addr: header address
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+ u64 hdr_table_addr:32;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: address of header table
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+ u64 hdr_table_addr:32;
+ u64 rsvd:32;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(0)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(1)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(2)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(5)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+ u16 interface_id;
+ u8 src_pipe_index;
+ u8 flags;
+ u32 metadata;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsvd1:3;
+ u64 metadata:32;
+ u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+ u64 ipv4_rules_addr:32;
+ u64 ipv4_expansion_rules_addr:32;
+ u64 index_table_addr:32;
+ u64 index_table_expansion_addr:32;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
new file mode 100644
index 0000000..63ef5fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+
+#define DRV_NAME "ipa"
+#define IPA_COOKIE 0xfacefeed
+
+#define IPA_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ (0x800)
+
+#ifdef IPA_DEBUG
+#define IPADBG(fmt, args...) \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#else
+#define IPADBG(fmt, args...)
+#endif
+
+#define IPAERR(fmt, args...) \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPA_TOS_EQ BIT(0)
+#define IPA_PROTOCOL_EQ BIT(1)
+#define IPA_OFFSET_MEQ32_0 BIT(2)
+#define IPA_OFFSET_MEQ32_1 BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0 BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1 BIT(5)
+#define IPA_IHL_OFFSET_EQ_16 BIT(6)
+#define IPA_IHL_OFFSET_EQ_32 BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0 BIT(8)
+#define IPA_OFFSET_MEQ128_0 BIT(9)
+#define IPA_OFFSET_MEQ128_1 BIT(10)
+#define IPA_TC_EQ BIT(11)
+#define IPA_FL_EQ BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1 BIT(13)
+#define IPA_METADATA_COMPARE BIT(14)
+#define IPA_IPV4_IS_FRAG BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN_MAX 4
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_SKB_SIZE 2048
+
+#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE (128)
+
+/**
+ * enum ipa_sys_pipe - 5 A5-IPA pipes
+ *
+ * 5 A5-IPA pipes (all system mode)
+ */
+enum ipa_sys_pipe {
+ IPA_A5_UNUSED,
+ IPA_A5_CMD,
+ IPA_A5_LAN_WAN_OUT,
+ IPA_A5_LAN_WAN_IN,
+ IPA_A5_WLAN_AMPDU_OUT,
+ IPA_A5_SYS_MAX
+};
+
+/**
+ * enum ipa_operating_mode - IPA operating mode
+ *
+ * IPA operating mode
+ */
+enum ipa_operating_mode {
+ IPA_MODE_USB_DONGLE,
+ IPA_MODE_MSM,
+ IPA_MODE_EXT_APPS,
+ IPA_MODE_MOBILE_AP_WAN,
+ IPA_MODE_MOBILE_AP_WLAN,
+ IPA_MODE_MOBILE_AP_ETH,
+ IPA_MODE_MAX
+};
+
+/**
+ * enum ipa_bridge_dir - direction of the bridge from air interface perspective
+ *
+ * IPA bridge direction
+ */
+enum ipa_bridge_dir {
+ IPA_DL,
+ IPA_UL,
+ IPA_DIR_MAX
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+ void *base;
+ dma_addr_t phys_base;
+ u32 size;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+ struct list_head link;
+ struct ipa_flt_rule rule;
+ u32 cookie;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_rt_tbl *rt_tbl;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of raouting table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ */
+struct ipa_rt_tbl {
+ struct list_head link;
+ struct list_head head_rt_rule_list;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u32 idx;
+ u32 rule_cnt;
+ u32 ref_cnt;
+ struct ipa_rt_tbl_set *set;
+ u32 cookie;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @is_partial: flag indicating if header table entry is partial
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of raouting table
+ */
+struct ipa_hdr_entry {
+ struct list_head link;
+ u8 hdr[IPA_HDR_MAX_SIZE];
+ u32 hdr_len;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u8 is_partial;
+ struct ipa_hdr_offset_entry *offset_entry;
+ u32 cookie;
+ u32 ref_cnt;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+ struct list_head head_hdr_entry_list;
+ struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+ u32 hdr_cnt;
+ u32 end;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+ struct list_head head_flt_rule_list;
+ u32 rule_cnt;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+ struct list_head link;
+ struct ipa_rt_rule rule;
+ u32 cookie;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_hdr_entry *hdr;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+ struct list_head head_rt_tbl_list;
+ u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_tree_node - handle database entry
+ * @node: RB node
+ * @hdl: handle
+ */
+struct ipa_tree_node {
+ struct rb_node node;
+ u32 hdl;
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information
+ * @notify: user provided CB for EP events notification
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ */
+struct ipa_ep_context {
+ int valid;
+ enum ipa_client_type client;
+ struct sps_pipe *ep_hdl;
+ struct ipa_ep_cfg cfg;
+ u32 dst_pipe_index;
+ u32 rt_tbl_idx;
+ struct sps_connect connect;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ bool desc_fifo_in_pipe_mem;
+ bool data_fifo_in_pipe_mem;
+ u32 desc_fifo_pipe_mem_ofst;
+ u32 data_fifo_pipe_mem_ofst;
+ bool desc_fifo_client_allocated;
+ bool data_fifo_client_allocated;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ * @wait_desc_list: used to hold completed Tx packets
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+ struct list_head head_desc_list;
+ u32 len;
+ spinlock_t spinlock;
+ struct sps_register_event event;
+ struct ipa_ep_context *ep;
+ struct list_head wait_desc_list;
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+ IPA_DATA_DESC,
+ IPA_DATA_DESC_SKB,
+ IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: info for the skb or immediate command param
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" tranfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ */
+struct ipa_tx_pkt_wrapper {
+ enum ipa_desc_type type;
+ struct ipa_mem_buffer mem;
+ struct work_struct work;
+ struct list_head link;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct ipa_sys_context *sys;
+ struct ipa_mem_buffer mult;
+ u16 cnt;
+ void *bounce;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+ enum ipa_desc_type type;
+ void *pyld;
+ u16 len;
+ u16 opcode;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @work: work struct for current Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+ struct sk_buff *skb;
+ dma_addr_t dma_address;
+ struct work_struct work;
+ struct list_head link;
+ u16 len;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ */
+struct ipa_nat_mem {
+ struct class *class;
+ struct device *dev;
+ struct cdev cdev;
+ dev_t dev_num;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+ bool is_mapped;
+ bool is_sys_mem;
+ bool is_dev_init;
+ struct mutex lock;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @tree_node_cache: tree nodes cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @sys: IPA sys context for system-bam pipes
+ * @rx_wq: Rx packets work queue
+ * @tx_wq: Tx packets work queue
+ * @smem_sz: shared memory size
+ * @hdr_hdl_tree: header handles tree
+ * @rt_rule_hdl_tree: routing rule handles tree
+ * @rt_tbl_hdl_tree: routing table handles tree
+ * @flt_rule_hdl_tree: filtering rule handles tree
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @polling_mode: 1 - pure polling mode; 0 - interrupt+polling mode
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @curr_polling_state: current polling state
+ * @poll_work: polling work
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_mem: header memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @pipe_mem_pool: pipe memory pool
+ * @one_kb_no_straddle_pool: one kb no straddle pool
+ *
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ u32 bam_handle;
+ struct ipa_ep_context ep[IPA_NUM_PIPES];
+ struct ipa_flt_tbl flt_tbl[IPA_NUM_PIPES][IPA_IP_MAX];
+ enum ipa_operating_mode mode;
+ void __iomem *mmio;
+ u32 ipa_wrapper_base;
+ struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+ struct ipa_hdr_tbl hdr_tbl;
+ struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+ struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+ struct kmem_cache *flt_rule_cache;
+ struct kmem_cache *rt_rule_cache;
+ struct kmem_cache *hdr_cache;
+ struct kmem_cache *hdr_offset_cache;
+ struct kmem_cache *rt_tbl_cache;
+ struct kmem_cache *tx_pkt_wrapper_cache;
+ struct kmem_cache *rx_pkt_wrapper_cache;
+ struct kmem_cache *tree_node_cache;
+ unsigned long rt_idx_bitmap[IPA_IP_MAX];
+ struct mutex lock;
+ struct ipa_sys_context sys[IPA_A5_SYS_MAX];
+ struct workqueue_struct *rx_wq;
+ struct workqueue_struct *tx_wq;
+ u16 smem_sz;
+ struct rb_root hdr_hdl_tree;
+ struct rb_root rt_rule_hdl_tree;
+ struct rb_root rt_tbl_hdl_tree;
+ struct rb_root flt_rule_hdl_tree;
+ struct ipa_nat_mem nat_mem;
+ u32 excp_hdr_hdl;
+ u32 dflt_v4_rt_rule_hdl;
+ u32 dflt_v6_rt_rule_hdl;
+ bool polling_mode;
+ uint aggregation_type;
+ uint aggregation_byte_limit;
+ uint aggregation_time_limit;
+ uint curr_polling_state;
+ struct delayed_work poll_work;
+ bool hdr_tbl_lcl;
+ struct ipa_mem_buffer hdr_mem;
+ bool ip4_rt_tbl_lcl;
+ bool ip6_rt_tbl_lcl;
+ bool ip4_flt_tbl_lcl;
+ bool ip6_flt_tbl_lcl;
+ struct ipa_mem_buffer empty_rt_tbl_mem;
+ struct gen_pool *pipe_mem_pool;
+ struct dma_pool *one_kb_no_straddle_pool;
+ atomic_t ipa_active_clients;
+ u32 clnt_hdl_cmd;
+ u32 clnt_hdl_data_in;
+ u32 clnt_hdl_data_out;
+ u8 a5_pipe_index;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ */
+struct ipa_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+ IPA_SPS_PIPE_MEM = 0,
+ IPA_PRIVATE_MEM = 1,
+ IPA_SYSTEM_MEM = 2,
+};
+
+/**
+ * enum a2_mux_pipe_direction - IPA-A2 pipe direction
+ */
+enum a2_mux_pipe_direction {
+ A2_TO_IPA = 0,
+ IPA_TO_A2 = 1
+};
+
+/**
+ * struct a2_mux_pipe_connection - A2 MUX pipe connection
+ * @src_phy_addr: source physical address
+ * @src_pipe_index: source pipe index
+ * @dst_phy_addr: destination physical address
+ * @dst_pipe_index: destination pipe index
+ * @mem_type: pipe memory type
+ * @data_fifo_base_offset: data FIFO base offset
+ * @data_fifo_size: data FIFO size
+ * @desc_fifo_base_offset: descriptors FIFO base offset
+ * @desc_fifo_size: descriptors FIFO size
+ */
+struct a2_mux_pipe_connection {
+ int src_phy_addr;
+ int src_pipe_index;
+ int dst_phy_addr;
+ int dst_pipe_index;
+ enum ipa_pipe_mem_type mem_type;
+ int data_fifo_base_offset;
+ int data_fifo_size;
+ int desc_fifo_base_offset;
+ int desc_fifo_size;
+};
+
+extern struct ipa_context *ipa_ctx;
+
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect);
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle);
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc);
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc);
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client);
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ u8 **buf,
+ u16 *en_rule);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+void ipa_dump(void);
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem);
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+/*
+ * below functions read from/write to IPA local memory a.k.a. device memory.
+ * the order of the arguments is deliberately different from the ipa_write*
+ * functions which operate on system memory
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram);
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram);
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram);
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count);
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count);
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count);
+
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data);
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl);
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+ ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+void ipa_replenish_rx_cache(void);
+void ipa_cleanup_rx(void);
+int ipa_cfg_filter(u32 disable);
+void ipa_write_done(struct work_struct *work);
+void ipa_handle_rx(struct work_struct *work);
+void ipa_handle_rx_core(void);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+ u32 val = ioread32(base + offset);
+ IPADBG("0x%x(va) read reg 0x%x r_val 0x%x.\n",
+ (u32)base, offset, val);
+ return val;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+ IPADBG("0x%x(va) write reg 0x%x w_val 0x%x.\n",
+ (u32)base, offset, val);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+int ipa_bridge_setup(enum ipa_bridge_dir dir);
+int ipa_bridge_teardown(enum ipa_bridge_dir dir);
+
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_nat.c b/drivers/platform/msm/ipa/ipa_nat.c
new file mode 100644
index 0000000..c13c53a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_nat.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY 0
+#define IPA_NAT_SHARED_MEMORY 1
+
+static int ipa_nat_vma_fault_remap(
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ IPADBG("\n");
+ vmf->page = NULL;
+
+ return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+ .fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_nat_mem *nat_ctx;
+ IPADBG("\n");
+ nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+ filp->private_data = nat_ctx;
+ IPADBG("return\n");
+ return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+ unsigned long phys_addr;
+ int result;
+
+ mutex_lock(&nat_ctx->lock);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ if (nat_ctx->is_mapped) {
+ IPAERR("mapping already exists, only 1 supported\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ IPADBG("map sz=0x%x\n", nat_ctx->size);
+ result =
+ dma_mmap_coherent(
+ NULL, vma,
+ nat_ctx->vaddr, nat_ctx->dma_handle,
+ nat_ctx->size);
+
+ if (result) {
+ IPAERR("unable to map memory. Err:%d\n", result);
+ goto bail;
+ }
+ } else {
+ IPADBG("Mapping shared(local) memory\n");
+ IPADBG("map sz=0x%lx\n", vsize);
+ phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
+ IPA_SRAM_DIRECT_ACCESS_n_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+ if (remap_pfn_range(
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ IPAERR("remap failed\n");
+ result = -EAGAIN;
+ goto bail;
+ }
+
+ }
+ nat_ctx->is_mapped = true;
+ vma->vm_ops = &ipa_nat_remap_vm_ops;
+ IPADBG("return\n");
+ result = 0;
+bail:
+ mutex_unlock(&nat_ctx->lock);
+ return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_nat_open,
+ .mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result;
+
+ IPADBG("passed memory size %d\n", mem->size);
+
+ mutex_lock(&nat_ctx->lock);
+ if (mem->size <= 0 || !strlen(mem->dev_name)
+ || nat_ctx->is_dev_init == true) {
+ IPADBG("Invalid Parameters or device is already init\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ nat_ctx->is_sys_mem = true;
+ nat_ctx->vaddr =
+ dma_alloc_coherent(NULL, mem->size, &nat_ctx->dma_handle,
+ gfp_flags);
+ if (nat_ctx->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ nat_ctx->size = mem->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ nat_ctx->is_sys_mem = false;
+ }
+
+ nat_ctx->class = class_create(THIS_MODULE, mem->dev_name);
+ if (IS_ERR(nat_ctx->class)) {
+ IPAERR("unable to create the class\n");
+ result = -ENODEV;
+ goto vaddr_alloc_fail;
+ }
+ result = alloc_chrdev_region(&nat_ctx->dev_num,
+ 0,
+ 1,
+ mem->dev_name);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto alloc_chrdev_region_fail;
+ }
+
+ nat_ctx->dev =
+ device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+ mem->dev_name);
+
+ if (IS_ERR(nat_ctx->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ result = -ENODEV;
+ goto device_create_fail;
+ }
+
+ cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+ nat_ctx->cdev.owner = THIS_MODULE;
+ nat_ctx->cdev.ops = &ipa_nat_fops;
+
+ result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ if (result) {
+ IPAERR("cdev_add err=%d\n", -result);
+ goto cdev_add_fail;
+ }
+ nat_ctx->is_dev_init = true;
+ IPADBG("IPA NAT driver init successfully\n");
+ result = 0;
+ goto bail;
+
+cdev_add_fail:
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+ class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+ if (nat_ctx->vaddr) {
+ IPADBG("Releasing system memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->vaddr = NULL;
+ nat_ctx->dma_handle = 0;
+ nat_ctx->size = 0;
+ }
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ int result;
+
+ IPADBG("\n");
+ if (init->tbl_index < 0 || init->table_entries <= 0) {
+ IPADBG("Table index or entries is zero\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ if (ipa_ctx->nat_mem.vaddr) {
+ IPADBG("using system memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+ cmd->ipv4_expansion_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+ IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+ cmd->index_table_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd->index_table_expansion_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ init->ipv4_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->ipv4_expansion_rules_addr =
+ init->expn_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_addr = init->index_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_expansion_addr =
+ init->index_expn_offset + IPA_RAM_NAT_OFST;
+ }
+ cmd->table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd->table_index);
+ cmd->size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+ cmd->size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+ cmd->public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ IPADBG("posting v4 init command\n");
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_cmd;
+ }
+
+ IPADBG("return\n");
+ result = 0;
+free_cmd:
+ kfree(cmd);
+bail:
+ return result;
+}
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ struct ipa_nat_dma *cmd = NULL;
+ struct ipa_desc *desc = NULL;
+ u16 size = 0, cnt = 0;
+ int ret = 0;
+
+ IPADBG("\n");
+ if (dma->entries <= 0) {
+ IPADBG("Invalid number of commands\n");
+ ret = -EPERM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_desc) * dma->entries;
+ desc = kmalloc(size, GFP_KERNEL);
+ if (desc == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_nat_dma) * dma->entries;
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ cmd[cnt].table_index = dma->dma[cnt].table_index;
+ cmd[cnt].base_addr = dma->dma[cnt].base_addr;
+ cmd[cnt].offset = dma->dma[cnt].offset;
+ cmd[cnt].data = dma->dma[cnt].data;
+ desc[cnt].type = IPA_IMM_CMD_DESC;
+ desc[cnt].opcode = IPA_NAT_DMA;
+ desc[cnt].callback = NULL;
+ desc[cnt].user1 = NULL;
+
+ desc[cnt].user2 = NULL;
+
+ desc[cnt].len = sizeof(struct ipa_nat_dma);
+ desc[cnt].pyld = (void *)&cmd[cnt];
+ }
+ IPADBG("posting dma command with entries %d\n", dma->entries);
+ ret = ipa_send_cmd(dma->entries, desc);
+ if (ret == -EPERM)
+ IPAERR("Fail to send immediate command\n");
+
+bail:
+ kfree(cmd);
+ kfree(desc);
+
+ return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx: [in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+ IPADBG("\n");
+ mutex_lock(&nat_ctx->lock);
+
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("freeing the dma memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->size = 0;
+ nat_ctx->vaddr = NULL;
+ }
+ nat_ctx->is_mapped = false;
+ nat_ctx->is_sys_mem = false;
+ cdev_del(&nat_ctx->cdev);
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+ class_destroy(nat_ctx->class);
+ nat_ctx->is_dev_init = false;
+
+ mutex_unlock(&nat_ctx->lock);
+ IPADBG("return\n");
+ return;
+}
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ u8 mem_type = IPA_NAT_SHARED_MEMORY;
+ u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+ int result;
+
+ IPADBG("\n");
+ if (del->table_index < 0 || del->public_ip_addr == 0) {
+ IPADBG("Bad Parameter\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ cmd->table_index = del->table_index;
+ cmd->ipv4_rules_addr = base_addr;
+ cmd->ipv4_rules_addr_type = mem_type;
+ cmd->ipv4_expansion_rules_addr = base_addr;
+ cmd->ipv4_expansion_rules_addr_type = mem_type;
+ cmd->index_table_addr = base_addr;
+ cmd->index_table_addr_type = mem_type;
+ cmd->index_table_expansion_addr = base_addr;
+ cmd->index_table_expansion_addr_type = mem_type;
+ cmd->size_base_tables = 0;
+ cmd->size_expansion_tables = 0;
+ cmd->public_ip_addr = del->public_ip_addr;
+
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_mem;
+ }
+
+ ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+ IPADBG("return\n");
+ result = 0;
+free_mem:
+ kfree(cmd);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
new file mode 100644
index 0000000..000718b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all 8K is available
+ * for SW use) the first 2K are set aside for NAT
+ */
+
+#define IPA_RAM_NAT_OFST 0
+#define IPA_RAM_NAT_SIZE 2048
+#define IPA_RAM_HDR_OFST 2048
+#define IPA_RAM_HDR_SIZE 256
+#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
+#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_RT_OFST (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
+#define IPA_RAM_V4_RT_SIZE 1024
+#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
+#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_RT_OFST (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
+#define IPA_RAM_V6_RT_SIZE 1024
+#define IPA_RAM_END_OFST (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
new file mode 100644
index 0000000..61913b6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+/*
+ * IPA's core specific regtisters
+ */
+
+#define IPA_REG_BASE_OFST 0x00020000
+
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+#define IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define IPA_COMP_HW_VERSION_MAJOR_BMSK 0xff000000
+#define IPA_COMP_HW_VERSION_MAJOR_SHFT 0x18
+#define IPA_COMP_HW_VERSION_MINOR_BMSK 0xff0000
+#define IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_VERSION_RMSK 0xffffffff
+#define IPA_VERSION_IPA_R_REV_BMSK 0xff000000
+#define IPA_VERSION_IPA_R_REV_SHFT 0x18
+#define IPA_VERSION_IPA_Q_REV_BMSK 0xff0000
+#define IPA_VERSION_IPA_Q_REV_SHFT 0x10
+#define IPA_VERSION_IPA_P_REV_BMSK 0xff00
+#define IPA_VERSION_IPA_P_REV_SHFT 0x8
+#define IPA_VERSION_IPA_ECO_REV_BMSK 0xff
+#define IPA_VERSION_IPA_ECO_REV_SHFT 0x0
+
+#define IPA_COMP_CFG_OFST 0x00000038
+#define IPA_COMP_CFG_RMSK 0x1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0x0
+
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+#define IPA_COMP_SW_RESET_RMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_BMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_SHFT 0x0
+
+#define IPA_CLKON_CFG_OFST 0x00000040
+#define IPA_CLKON_CFG_RMSK 0xf
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_TX_BMSK 0x4
+#define IPA_CLKON_CFG_CGC_OPEN_TX_SHFT 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_SHFT 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+#define IPA_HEAD_OF_LINE_BLOCK_EN_RMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_BMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_OFST 0x00000048
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_RMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_BMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_SHFT 0x0
+
+#define IPA_ROUTE_OFST 0x0000004c
+#define IPA_ROUTE_RMSK 0x1ffff
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+
+#define IPA_FILTER_OFST 0x00000050
+#define IPA_FILTER_RMSK 0x1
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_MASTER_PRIORITY_OFST 0x00000054
+#define IPA_MASTER_PRIORITY_RMSK 0xffffffff
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_BMSK 0xc0000000
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_SHFT 0x1e
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_BMSK 0x30000000
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_SHFT 0x1c
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_BMSK 0xc000000
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_SHFT 0x1a
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_BMSK 0x3000000
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_SHFT 0x18
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_BMSK 0xc00000
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_SHFT 0x16
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_BMSK 0x300000
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_SHFT 0x14
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_BMSK 0xc0000
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_SHFT 0x12
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_BMSK 0x30000
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_SHFT 0x10
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_BMSK 0xc000
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_SHFT 0xe
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_BMSK 0x3000
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_SHFT 0xc
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_BMSK 0xc00
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_SHFT 0xa
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_BMSK 0x300
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_SHFT 0x8
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_BMSK 0xc0
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_SHFT 0x6
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_BMSK 0x30
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_SHFT 0x4
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_BMSK 0xc
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_SHFT 0x2
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_BMSK 0x3
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_SHFT 0x0
+
+#define IPA_SHARED_MEM_SIZE_OFST 0x00000058
+#define IPA_SHARED_MEM_SIZE_RMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+#define IPA_NAT_TIMER_OFST 0x0000005c
+#define IPA_NAT_TIMER_RMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_BMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_SHFT 0x0
+
+#define IPA_NAT_TIMER_RESET_OFST 0x00000060
+#define IPA_NAT_TIMER_RESET_RMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_BMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_SHFT 0x0
+
+#define IPA_ENDP_INIT_NAT_n_OFST(n) (0x00000080 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_MAXn 19
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_n_OFST(n) (0x000000e0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_n_RMSK 0x7ffffff
+#define IPA_ENDP_INIT_HDR_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_OFST(n) (0x00000140 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_n_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_n_MAXn 19
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x7c
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x2
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_OFST(n) (0x000001a0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_n_RMSK 0x7fff
+#define IPA_ENDP_INIT_AGGR_n_MAXn 19
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_ROUTE_n_OFST(n) (0x00000200 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_n_RMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_MAXn 19
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_1_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_SPARE_REG_2_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_SRAM_DIRECT_ACCESS_n_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_n_RMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_MAXn 2047
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_BMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_SHFT 0x0
+
+#endif /* __IPA_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
new file mode 100644
index 0000000..c69e1fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_RT_TABLE_WORD_SIZE (4)
+#define IPA_RT_INDEX_BITMAP_SIZE (32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
+#define IPA_RT_BIT_MASK (0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_rt_hw_rule() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ struct ipa_rt_rule_hw_hdr *rule_hdr;
+ const struct ipa_rt_rule *rule =
+ (const struct ipa_rt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+ int pipe_idx;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+ pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst);
+ if (pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+ rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+ if (entry->hdr)
+ rule_hdr->u.hdr.hdr_offset =
+ entry->hdr->offset_entry->offset >> 2;
+ else
+ rule_hdr->u.hdr.hdr_offset = 0;
+
+ buf += sizeof(struct ipa_rt_rule_hw_hdr);
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule 0x%x\n", en_rule);
+
+ rule_hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR(
+ "hw_len differs b/w passes passed=0x%x calc=0x%x\n",
+ entry->hw_len,
+ (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+ int *max_rt_idx)
+{
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ u32 total_sz = 0;
+ u32 tbl_sz;
+ u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+ int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+ int i;
+
+ *hdr_sz = 0;
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (bitmap & IPA_RT_BIT_MASK)
+ highest_bit_set = i;
+ bitmap >>= 1;
+ }
+
+ *max_rt_idx = highest_bit_set;
+ if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("no rt tbls present\n");
+ total_sz = IPA_RT_TABLE_WORD_SIZE;
+ *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+ return total_sz;
+ }
+
+ *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ tbl_sz = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW RT rule size\n");
+ return -EPERM;
+ }
+ tbl_sz += entry->hw_len;
+ }
+
+ if (tbl_sz)
+ tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+ if (tbl->in_sys)
+ continue;
+
+ if (tbl_sz) {
+ /* add the terminator */
+ total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+ /* every rule-set should start at word boundary */
+ total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer rt_tbl_mem;
+ u8 *rt_tbl_mem_body;
+ int max_rt_idx;
+ int i;
+
+ mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+ if (mem->size == 0) {
+ IPAERR("rt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* setup all indices to point to the empty sys rt tbl */
+ for (i = 0; i <= max_rt_idx; i++)
+ ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+ hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ offset = body - base;
+ if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n", offset);
+ goto proc_err;
+ }
+
+ if (!tbl->in_sys) {
+ /* convert offset to words from bytes */
+ offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_RT_BIT_MASK;
+
+ /* update the hdr at the right index */
+ ipa_write_32(offset, hdr +
+ (tbl->idx * IPA_RT_TABLE_WORD_SIZE));
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_RT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the RT tbl */
+ rt_tbl_mem.size = tbl->sz;
+ rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, rt_tbl_mem.size,
+ &rt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!rt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ rt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(rt_tbl_mem.phys_base &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+ rt_tbl_mem_body = rt_tbl_mem.base;
+ memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+ /* update the hdr at the right index */
+ ipa_write_32(rt_tbl_mem.phys_base,
+ hdr + (tbl->idx *
+ IPA_RT_TABLE_WORD_SIZE));
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry,
+ rt_tbl_mem_body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ WARN_ON(1);
+ goto rt_table_mem_alloc_failed;
+ }
+ rt_tbl_mem_body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = rt_tbl_mem;
+ }
+ }
+
+ return 0;
+
+rt_table_mem_alloc_failed:
+ dma_free_coherent(NULL, rt_tbl_mem.size,
+ rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *next;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+ }
+
+ set = &ipa_ctx->reap_rt_tbl_set[ip];
+ list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+ list_del(&tbl->link);
+ WARN_ON(tbl->prev_mem.phys_base != 0);
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+ ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ }
+ }
+}
+
+static int __ipa_commit_rt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_routing_init *v4;
+ struct ipa_ip_v6_routing_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_RT_SIZE;
+ size = sizeof(struct ipa_ip_v4_routing_init);
+ } else {
+ avail = IPA_RAM_V6_RT_SIZE;
+ size = sizeof(struct ipa_ip_v6_routing_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_rt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_routing_init *)cmd;
+ desc.opcode = IPA_IP_V4_ROUTING_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_routing_init *)cmd;
+ desc.opcode = IPA_IP_V6_ROUTING_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_rt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+ return -EPERM;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ * which name is given as parameter
+ * @ip: [in] the ip address family type of the wanted routing table
+ * @name: [in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+ const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_tree_node *node;
+ int i;
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto node_alloc_fail;
+ }
+
+ if (ip >= IPA_IP_MAX || name == NULL) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ /* check if this table exists */
+ entry = __ipa_find_rt_tbl(ip, name);
+ if (!entry) {
+ entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT tbl object\n");
+ goto error;
+ }
+ /* find a routing tbl index */
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+ entry->idx = i;
+ set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+ break;
+ }
+ }
+ if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+ IPAERR("not free RT tbl indices left\n");
+ goto fail_rt_idx_alloc;
+ }
+
+ INIT_LIST_HEAD(&entry->head_rt_rule_list);
+ INIT_LIST_HEAD(&entry->link);
+ strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+ entry->set = set;
+ entry->cookie = IPA_COOKIE;
+ entry->in_sys = (ip == IPA_IP_v4) ?
+ !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+ set->tbl_cnt++;
+ list_add(&entry->link, &set->head_rt_tbl_list);
+
+ IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+ set->tbl_cnt, ip);
+
+ node->hdl = (u32)entry;
+ if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ }
+
+ return entry;
+
+fail_rt_idx_alloc:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+node_alloc_fail:
+ return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ if (!entry->in_sys) {
+ list_del(&entry->link);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+ } else {
+ list_move(&entry->link,
+ &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ }
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (rule->hdr_hdl &&
+ ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
+ ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ tbl = __ipa_add_rt_tbl(ip, name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ goto fail_rt_tbl_sanity;
+ }
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
+ (tbl->rule_cnt > 0) && (at_rear != 0)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+ tbl->rule_cnt, at_rear);
+ goto fail_rt_tbl_sanity;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT rule object\n");
+ goto fail_rt_tbl_sanity;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->cookie = IPA_COOKIE;
+ entry->rule = *rule;
+ entry->tbl = tbl;
+ entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
+ if (at_rear)
+ list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_rt_rule_list);
+ tbl->rule_cnt++;
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+ *rule_hdl = (u32)entry;
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ goto ipa_insert_failed;
+ }
+
+ return 0;
+
+ipa_insert_failed:
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+fail_rt_tbl_sanity:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+static int __ipa_del_rt_rule(u32 rule_hdl)
+{
+ struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+ entry->tbl->rule_cnt);
+ if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry->tbl))
+ IPAERR("fail to del RT tbl\n");
+ }
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int i;
+ int ret;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_rt(hdls->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+ /*
+ * issue a commit on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_commit_flt(ip))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_rt(ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *tbl_next;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_entry *rule;
+ struct ipa_rt_entry *rule_next;
+ struct ipa_tree_node *node;
+ struct ipa_rt_tbl_set *rset;
+
+ /*
+ * issue a reset on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_reset_flt(ip))
+ IPAERR("fail to reset flt ip=%d\n", ip);
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ rset = &ipa_ctx->reap_rt_tbl_set[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset rt ip=%d\n", ip);
+ list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ list_for_each_entry_safe(rule, rule_next,
+ &tbl->head_rt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
+ (u32)rule);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /*
+ * for the "default" routing tbl, remove all but the
+ * last rule
+ */
+ if (tbl->idx == 0 && tbl->rule_cnt == 1)
+ continue;
+
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ rule->hdr->ref_cnt--;
+ rule->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /* do not remove the "default" routing tbl which has index 0 */
+ if (tbl->idx != 0) {
+ if (!tbl->in_sys) {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ } else {
+ list_move(&tbl->link, &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ }
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ struct ipa_rt_tbl *entry;
+ int result = -EFAULT;
+
+ if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
+ if (entry && entry->cookie == IPA_COOKIE) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t)entry;
+
+ /* commit for get */
+ if (__ipa_commit_rt(lookup->ip))
+ IPAERR("fail to commit RT tbl\n");
+
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE) ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry))
+ IPAERR("fail to del RT tbl\n");
+ /* commit for put */
+ if (__ipa_commit_rt(ip))
+ IPAERR("fail to commit RT tbl\n");
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
new file mode 100644
index 0000000..d5d5566
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h> /* gen_pool_alloc() */
+#include <linux/io.h>
+#include "ipa_i.h"
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1, -1 };
+
+static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+};
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST,
+ IPA_SETFIELD(route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK) |
+ IPA_SETFIELD(route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+
+ return 0;
+}
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_FILTER_OFST,
+ IPA_SETFIELD(!disable,
+ IPA_FILTER_FILTER_EN_SHFT,
+ IPA_FILTER_FILTER_EN_BMSK));
+ return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+ u32 ipa_version = 0;
+
+ /* do soft reset of IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+ /* enable IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+ /* Read IPA version and make sure we have access to the registers */
+ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+ if (ipa_version == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @mode: IPA operating mode
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client)
+{
+ return ep_mapping[mode][client];
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+ *dest++ = (u8)((hw) & 0xFF);
+ *dest++ = (u8)((hw >> 8) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+ *dest++ = (b) & 0xFF;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+ int i = (u32)dest & 0x3;
+ int j;
+
+ if (i)
+ for (j = 0; j < (4 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+
+ if (ip == IPA_IP_v4) {
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+ IPA_FLT_FLOW_LABEL) {
+ IPAERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_TOS_EQ;
+ *buf = ipa_write_8(attrib->u.v4.tos, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 12 => offset of src ip in v4 header */
+ *buf = ipa_write_8(12, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 16 => offset of dst ip in v4 header */
+ *buf = ipa_write_8(16, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v4 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v4 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IPV4_IS_FRAG;
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else if (ip == IPA_IP_v6) {
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL ||
+ attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ IPAERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v6 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v6 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 8 => offset of src ip in v6 header */
+ *buf = ipa_write_8(8, *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 24 => offset of dst ip in v6 header */
+ *buf = ipa_write_8(24, *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_FLT_TC;
+ *buf = ipa_write_8(attrib->u.v6.tc, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_FLT_FLOW_LABEL;
+ /* FIXME FL is only 20 bits */
+ *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else {
+ IPAERR("unsupported ip %d\n", ip);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ *buf = ipa_write_8(0, *buf); /* offset */
+ *buf = ipa_write_32(0, *buf); /* mask */
+ *buf = ipa_write_32(0, *buf); /* val */
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int result = -EINVAL;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+ if (result)
+ return result;
+
+ if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+ result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+ /* clnt_hdl is used as pipe_index */
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ u32 val;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
+ ipa_ctx->ep[clnt_hdl].dst_pipe_index = ipa_get_ep_mapping(ipa_ctx->mode,
+ ipa_ep_cfg->dst);
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * if DMA mode was configured previously for this EP, return with
+ * success
+ */
+ if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+ IPADBG("DMA mode for EP %d\n", clnt_hdl);
+ return 0;
+ }
+
+ if (ipa_ep_cfg->rt_tbl_hdl)
+ IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+ /* always use the "default" routing tables whose indices are 0 */
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+ int i;
+ u32 *cur = (u32 *)base;
+ u8 *byt;
+ IPADBG("START phys=%x\n", phy_base);
+ for (i = 0; i < size / 4; i++) {
+ byt = (u8 *)(cur + i);
+ IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
+ byt[0], byt[1], byt[2], byt[3]);
+ }
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_dump() - dumps part of driver data structures for debug purposes
+ */
+void ipa_dump(void)
+{
+ struct ipa_mem_buffer hdr_mem = { 0 };
+ struct ipa_mem_buffer rt_mem = { 0 };
+ struct ipa_mem_buffer flt_mem = { 0 };
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_generate_hdr_hw_tbl(&hdr_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
+ IPAERR("fail\n");
+ IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
+ flt_mem.phys_base);
+ IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
+ (u32)rt_mem.base, (u32)flt_mem.base);
+ IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
+ flt_mem.size);
+ IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+ IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
+ IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
+ if (hdr_mem.phys_base)
+ dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
+ hdr_mem.phys_base);
+ if (rt_mem.phys_base)
+ dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
+ rt_mem.phys_base);
+ if (flt_mem.phys_base)
+ dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
+ flt_mem.phys_base);
+ mutex_unlock(&ipa_ctx->lock);
+}
+
+/*
+ * TODO: add swap if needed, for now assume LE is ok for device memory
+ * even though IPA registers are assumed to be BE
+ */
+/**
+ * ipa_write_dev_8() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram)
+{
+ iowrite8(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_16() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ *
+ */
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram)
+{
+ iowrite16(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_32() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram)
+{
+ iowrite32(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_8() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram)
+{
+ return ioread8((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_16() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram)
+{
+ return ioread16((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_32() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram)
+{
+ return ioread32((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_8rep() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf, unsigned long count)
+{
+ iowrite8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_write_dev_16rep() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_write_dev_32rep() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_read_dev_8rep() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_16rep() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_32rep() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_memset_dev() - memset IO
+ * @ofst_ipa_sram: address to set
+ * @value: value
+ * @count: number of bytes to set
+ */
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count)
+{
+ memset_io((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), value,
+ count);
+}
+
+/**
+ * ipa_memcpy_from_dev() - copy memory from device
+ * @dest: buffer to copy to
+ * @ofst_ipa_sram: address
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count)
+{
+ memcpy_fromio(dest, (void *)((u32)ipa_ctx->mmio + 0x4000 +
+ ofst_ipa_sram), count);
+}
+
+/**
+ * ipa_memcpy_to_dev() - copy memory to device
+ * @ofst_ipa_sram: address
+ * @source: buffer to copy from
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count)
+{
+ memcpy_toio((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ source, count);
+}
+
+/**
+ * ipa_defrag() - handle de-frag for bridging type of cases
+ * @skb: skb
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_defrag(struct sk_buff *skb)
+{
+ /*
+ * Reassemble IP fragments. TODO: need to setup network_header to
+ * point to start of IP header
+ */
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+ if (ip_defrag(skb, IP_DEFRAG_CONNTRACK_IN))
+ return -EINPROGRESS;
+ }
+
+ /* skb is not fully assembled, send it back out */
+ return 0;
+}
+
+/**
+ * ipa_search() - search for handle in RB tree
+ * @root: tree root
+ * @hdl: handle
+ *
+ * Return value: tree node corresponding to the handle
+ */
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct ipa_tree_node *data = container_of(node,
+ struct ipa_tree_node, node);
+
+ if (hdl < data->hdl)
+ node = node->rb_left;
+ else if (hdl > data->hdl)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/**
+ * ipa_insert() - insert new node to RB tree
+ * @root: tree root
+ * @data: new data to insert
+ *
+ * Return value:
+ * 0: success
+ * -EPERM: tree already contains the node with provided handle
+ */
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct ipa_tree_node *this = container_of(*new,
+ struct ipa_tree_node, node);
+
+ parent = *new;
+ if (data->hdl < this->hdl)
+ new = &((*new)->rb_left);
+ else if (data->hdl > this->hdl)
+ new = &((*new)->rb_right);
+ else
+ return -EPERM;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return 0;
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+ int res;
+ u32 aligned_start_ofst;
+ u32 aligned_size;
+ struct gen_pool *pool;
+
+ if (!size) {
+ IPAERR("no IPA pipe mem alloted\n");
+ goto fail;
+ }
+
+ aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+ aligned_size = size - (aligned_start_ofst - start_ofst);
+
+ IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+ start_ofst, aligned_start_ofst, size, aligned_size);
+
+ /* allocation order of 8 i.e. 128 bytes, global pool */
+ pool = gen_pool_create(8, -1);
+ if (!pool) {
+ IPAERR("Failed to create a new memory pool.\n");
+ goto fail;
+ }
+
+ res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+ if (res) {
+ IPAERR("Failed to add memory to IPA pipe pool\n");
+ goto err_pool_add;
+ }
+
+ ipa_ctx->pipe_mem_pool = pool;
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(pool);
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+ u32 vaddr;
+ int res = -1;
+
+ if (!ipa_ctx->pipe_mem_pool || !size) {
+ IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+ ipa_ctx->pipe_mem_pool);
+ return res;
+ }
+
+ vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+ if (vaddr) {
+ *ofst = vaddr;
+ res = 0;
+ IPADBG("size=%u ofst=%u\n", size, vaddr);
+ } else {
+ IPAERR("size=%u failed\n", size);
+ }
+
+ return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+ IPADBG("size=%u ofst=%u\n", size, ofst);
+ if (ipa_ctx->pipe_mem_pool && size)
+ gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+ return 0;
+}
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST,
+ ((mode & IPA_AGGREGATION_MODE_MSK) <<
+ IPA_AGGREGATION_MODE_SHFT) |
+ (reg_val & IPA_AGGREGATION_MODE_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ u32 reg_val;
+
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
+ IPA_AGGREGATION_QCNCM_SIG0_SHFT |
+ (sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
+ sig[2] | (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
+ IPA_AGGREGATION_SINGLE_NDP_MSK) |
+ (reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+ u32 next_start;
+ u32 prev_end;
+
+ IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+ next_start = (start + (boundary - 1)) & ~(boundary - 1);
+ prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+ while (next_start < prev_end)
+ next_start += boundary;
+
+ if (next_start == prev_end)
+ return 1;
+ else
+ return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
new file mode 100644
index 0000000..3c7f5ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "a2_service.h"
+#include "ipa_i.h"
+
+static struct rmnet_bridge_cb_type {
+ u32 producer_handle;
+ u32 consumer_handle;
+ bool is_connected;
+} rmnet_bridge_cb;
+
+/**
+* rmnet_bridge_init() - Initialize RmNet bridge module
+*
+* Return codes:
+* 0: success
+*/
+int rmnet_bridge_init(void)
+{
+ memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
+
+ return 0;
+}
+EXPORT_SYMBOL(rmnet_bridge_init);
+
+/**
+* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_disconnect(void)
+{
+ int ret = 0;
+ if (false == rmnet_bridge_cb.is_connected) {
+ pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.is_connected = false;
+
+ ret = ipa_bridge_teardown(IPA_DL);
+ ret = ipa_bridge_teardown(IPA_UL);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_disconnect);
+
+/**
+* rmnet_bridge_connect() - Connect RmNet bridge module
+* @producer_hdl: IPA producer handle
+* @consumer_hdl: IPA consumer handle
+* @wwan_logical_channel_id: WWAN logical channel ID
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id)
+{
+ int ret = 0;
+
+ if (true == rmnet_bridge_cb.is_connected) {
+ ret = 0;
+ pr_err("%s: trying to connect already connected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.consumer_handle = consumer_hdl;
+ rmnet_bridge_cb.producer_handle = producer_hdl;
+ rmnet_bridge_cb.is_connected = true;
+
+ ret = ipa_bridge_setup(IPA_DL);
+ if (ret) {
+ pr_err("%s: IPA DL bridge setup failure\n", __func__);
+ goto bail_dl;
+ }
+ ret = ipa_bridge_setup(IPA_UL);
+ if (ret) {
+ pr_err("%s: IPA UL bridge setup failure\n", __func__);
+ goto bail_ul;
+ }
+ return 0;
+bail_ul:
+ ipa_bridge_teardown(IPA_DL);
+bail_dl:
+ rmnet_bridge_cb.is_connected = false;
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_connect);
+
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle)
+{
+ if (producer_handle == NULL || consumer_handle == NULL)
+ return;
+
+ *producer_handle = rmnet_bridge_cb.producer_handle;
+ *consumer_handle = rmnet_bridge_cb.consumer_handle;
+}
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index f671ece..a3bbb73 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -1005,6 +1005,8 @@
no_queue = ((options & SPS_O_NO_Q));
ack_xfers = ((options & SPS_O_ACK_TRANSFERS));
+ pipe->hybrid = options & SPS_O_HYBRID;
+
/* Create interrupt source mask */
mask = 0;
for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
@@ -1773,7 +1775,7 @@
}
/* If pipe is polled and queue is enabled, perform polling operation */
- if (pipe->polled && !pipe->sys.no_queue)
+ if ((pipe->polled || pipe->hybrid) && !pipe->sys.no_queue)
pipe_handler_eot(dev, pipe);
/* Is there a completed descriptor? */
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
index 6004b75..84d2b97 100644
--- a/drivers/platform/msm/sps/sps_bam.h
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -170,6 +170,7 @@
u32 pipe_index_mask;
u32 irq_mask;
int polled;
+ int hybrid;
u32 irq_gen_addr;
enum sps_mode mode;
u32 num_descs; /* Size (number of elements) of descriptor FIFO */
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index bf78b1c..8f97531 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -23,11 +23,12 @@
#include <linux/usb/msm_hsusb.h>
#include <mach/usb_bam.h>
#include <mach/sps.h>
+#include <mach/ipa.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#define USB_SUMMING_THRESHOLD 512
-#define CONNECTIONS_NUM 4
+#define CONNECTIONS_NUM 8
static struct sps_bam_props usb_props;
static struct sps_pipe *sps_pipes[CONNECTIONS_NUM][2];
@@ -49,7 +50,8 @@
u32 *src_pipe;
u32 *dst_pipe;
struct usb_bam_wake_event_info peer_event;
- bool enabled;
+ bool src_enabled;
+ bool dst_enabled;
};
static struct usb_bam_connect_info usb_bam_connections[CONNECTIONS_NUM];
@@ -206,7 +208,7 @@
ret = sps_connect(*pipe, connection);
if (ret < 0) {
- pr_err("%s: tx connect error %d\n", __func__, ret);
+ pr_err("%s: sps_connect failed %d\n", __func__, ret);
goto error;
}
return 0;
@@ -218,9 +220,119 @@
return ret;
}
+static int connect_pipe_ipa(
+ struct usb_bam_connect_ipa_params *connection_params)
+{
+ int ret;
+ u8 conn_idx = connection_params->idx;
+ enum usb_bam_pipe_dir pipe_dir = connection_params->dir;
+ struct sps_pipe **pipe = &sps_pipes[conn_idx][pipe_dir];
+ struct sps_connect *connection =
+ &sps_connections[conn_idx][pipe_dir];
+ struct msm_usb_bam_platform_data *pdata =
+ usb_bam_pdev->dev.platform_data;
+ struct usb_bam_pipe_connect *pipe_connection =
+ &msm_usb_bam_connections_info
+ [pdata->usb_active_bam][conn_idx][pipe_dir];
-static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
- u32 *usb_pipe_idx)
+ struct ipa_connect_params ipa_in_params;
+ struct ipa_sps_params sps_out_params;
+ u32 usb_handle, usb_phy_addr;
+ u32 clnt_hdl = 0;
+
+ memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+ memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+ if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+ usb_phy_addr = pipe_connection->src_phy_addr;
+ ipa_in_params.client_ep_idx = pipe_connection->src_pipe_index;
+ } else {
+ usb_phy_addr = pipe_connection->dst_phy_addr;
+ ipa_in_params.client_ep_idx = pipe_connection->dst_pipe_index;
+ }
+ /* Get HSUSB / HSIC bam handle */
+ ret = sps_phy2h(usb_phy_addr, &usb_handle);
+ if (ret) {
+ pr_err("%s: sps_phy2h failed (HSUSB/HSIC BAM) %d\n",
+ __func__, ret);
+ goto get_config_failed;
+ }
+
+ /* IPA input parameters */
+ ipa_in_params.client_bam_hdl = usb_handle;
+ ipa_in_params.desc_fifo_sz = pipe_connection->desc_fifo_size;
+ ipa_in_params.data_fifo_sz = pipe_connection->data_fifo_size;
+ ipa_in_params.notify = connection_params->notify;
+ ipa_in_params.priv = connection_params->priv;
+ ipa_in_params.client = connection_params->client;
+ if (pipe_connection->mem_type != SYSTEM_MEM)
+ ipa_in_params.pipe_mem_preferred = true;
+
+ memcpy(&ipa_in_params.ipa_ep_cfg, &connection_params->ipa_ep_cfg,
+ sizeof(struct ipa_ep_cfg));
+
+ ret = ipa_connect(&ipa_in_params, &sps_out_params, &clnt_hdl);
+ if (ret) {
+ pr_err("%s: ipa_connect failed\n", __func__);
+ return ret;
+ }
+
+ *pipe = sps_alloc_endpoint();
+ if (*pipe == NULL) {
+ pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+ ret = -ENOMEM;
+ goto disconnect_ipa;
+ }
+
+ ret = sps_get_config(*pipe, connection);
+ if (ret) {
+ pr_err("%s: tx get config failed %d\n", __func__, ret);
+ goto get_config_failed;
+ }
+
+ if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+ /* USB src IPA dest */
+ connection->mode = SPS_MODE_SRC;
+ connection_params->cons_clnt_hdl = clnt_hdl;
+ connection->source = usb_handle;
+ connection->destination = sps_out_params.ipa_bam_hdl;
+ connection->src_pipe_index = pipe_connection->src_pipe_index;
+ connection->dest_pipe_index = sps_out_params.ipa_ep_idx;
+ *(connection_params->src_pipe) = connection->src_pipe_index;
+ } else {
+ /* IPA src, USB dest */
+ connection->mode = SPS_MODE_DEST;
+ connection_params->prod_clnt_hdl = clnt_hdl;
+ connection->source = sps_out_params.ipa_bam_hdl;
+ connection->destination = usb_handle;
+ connection->src_pipe_index = sps_out_params.ipa_ep_idx;
+ connection->dest_pipe_index = pipe_connection->dst_pipe_index;
+ *(connection_params->dst_pipe) = connection->dest_pipe_index;
+ }
+
+ connection->data = sps_out_params.data;
+ connection->desc = sps_out_params.desc;
+ connection->event_thresh = 16;
+ connection->options = SPS_O_AUTO_ENABLE;
+
+ ret = sps_connect(*pipe, connection);
+ if (ret < 0) {
+ pr_err("%s: sps_connect failed %d\n", __func__, ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sps_disconnect(*pipe);
+get_config_failed:
+ sps_free_endpoint(*pipe);
+disconnect_ipa:
+ ipa_disconnect(clnt_hdl);
+ return ret;
+}
+
+static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir)
{
struct msm_usb_bam_platform_data *pdata =
usb_bam_pdev->dev.platform_data;
@@ -269,7 +381,7 @@
return -EINVAL;
}
- if (connection->enabled) {
+ if (connection->src_enabled && connection->dst_enabled) {
pr_debug("%s: connection %d was already established\n",
__func__, idx);
return 0;
@@ -281,22 +393,66 @@
if (src_pipe_idx) {
/* open USB -> Peripheral pipe */
ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
- connection->src_pipe);
+ connection->src_pipe);
if (ret) {
pr_err("%s: src pipe connection failure\n", __func__);
return ret;
}
}
+ connection->src_enabled = 1;
+
if (dst_pipe_idx) {
/* open Peripheral -> USB pipe */
ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
- connection->dst_pipe);
+ connection->dst_pipe);
if (ret) {
pr_err("%s: dst pipe connection failure\n", __func__);
return ret;
}
}
- connection->enabled = 1;
+ connection->dst_enabled = 1;
+
+ return 0;
+}
+
+int usb_bam_connect_ipa(struct usb_bam_connect_ipa_params *ipa_params)
+{
+ u8 idx = ipa_params->idx;
+ struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+ int ret;
+
+ if (idx >= CONNECTIONS_NUM) {
+ pr_err("%s: Invalid connection index\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((connection->src_enabled &&
+ ipa_params->dir == USB_TO_PEER_PERIPHERAL) ||
+ (connection->dst_enabled &&
+ ipa_params->dir == PEER_PERIPHERAL_TO_USB)) {
+ pr_debug("%s: connection %d was already established\n",
+ __func__, idx);
+ return 0;
+ }
+
+ if (ipa_params->dir == USB_TO_PEER_PERIPHERAL)
+ connection->src_pipe = ipa_params->src_pipe;
+ else
+ connection->dst_pipe = ipa_params->dst_pipe;
+
+ connection->idx = idx;
+
+ ret = connect_pipe_ipa(ipa_params);
+ if (ret) {
+ pr_err("%s: dst pipe connection failure\n", __func__);
+ return ret;
+ }
+
+ if (ipa_params->dir == USB_TO_PEER_PERIPHERAL)
+ connection->src_enabled = 1;
+ else
+ connection->dst_enabled = 1;
return 0;
}
@@ -364,39 +520,78 @@
return -EINVAL;
}
- if (!connection->enabled) {
+ if (!connection->src_enabled && !connection->dst_enabled) {
pr_debug("%s: connection %d isn't enabled\n",
__func__, idx);
return 0;
}
- if (connection->src_pipe) {
+ if (connection->src_enabled) {
/* close USB -> Peripheral pipe */
- ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
- connection->src_pipe);
+ ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL);
if (ret) {
pr_err("%s: src pipe connection failure\n", __func__);
return ret;
}
-
+ connection->src_enabled = 0;
}
- if (connection->dst_pipe) {
+ if (connection->dst_enabled) {
/* close Peripheral -> USB pipe */
- ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
- connection->dst_pipe);
+ ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB);
if (ret) {
pr_err("%s: dst pipe connection failure\n", __func__);
return ret;
}
+ connection->dst_enabled = 0;
}
connection->src_pipe = 0;
connection->dst_pipe = 0;
- connection->enabled = 0;
return 0;
}
+int usb_bam_disconnect_ipa(u8 idx,
+ struct usb_bam_connect_ipa_params *ipa_params)
+{
+ struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+ int ret;
+ if (!usb_bam_pdev) {
+ pr_err("%s: usb_bam device not found\n", __func__);
+ return -ENODEV;
+ }
+
+ if (idx >= CONNECTIONS_NUM) {
+ pr_err("%s: Invalid connection index\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Currently just calls ipa_disconnect, no sps pipes
+ disconenction support */
+
+ /* close IPA -> USB pipe */
+ if (connection->dst_pipe) {
+ ret = ipa_disconnect(ipa_params->prod_clnt_hdl);
+ if (ret) {
+ pr_err("%s: dst pipe disconnection failure\n",
+ __func__);
+ return ret;
+ }
+ }
+ /* close USB -> IPA pipe */
+ if (connection->src_pipe) {
+ ret = ipa_disconnect(ipa_params->cons_clnt_hdl);
+ if (ret) {
+ pr_err("%s: src pipe disconnection failure\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+
+}
static int update_connections_info(struct device_node *node, int bam,
int conn_num, int dir, enum usb_pipe_mem_type mem_type)
{
@@ -412,33 +607,28 @@
key = "qcom,src-bam-physical-address";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->src_phy_addr = val;
+ if (!rc)
+ pipe_connection->src_phy_addr = val;
key = "qcom,src-bam-pipe-index";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->src_pipe_index = val;
+ if (!rc)
+ pipe_connection->src_pipe_index = val;
key = "qcom,dst-bam-physical-address";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->dst_phy_addr = val;
+ if (!rc)
+ pipe_connection->dst_phy_addr = val;
key = "qcom,dst-bam-pipe-index";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->dst_pipe_index = val;
+ if (!rc)
+ pipe_connection->dst_pipe_index = val;
key = "qcom,data-fifo-offset";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->data_fifo_base_offset = val;
+ if (!rc)
+ pipe_connection->data_fifo_base_offset = val;
key = "qcom,data-fifo-size";
rc = of_property_read_u32(node, key, &val);
@@ -448,9 +638,8 @@
key = "qcom,descriptor-fifo-offset";
rc = of_property_read_u32(node, key, &val);
- if (rc)
- goto err;
- pipe_connection->desc_fifo_base_offset = val;
+ if (!rc)
+ pipe_connection->desc_fifo_base_offset = val;
key = "qcom,descriptor-fifo-size";
rc = of_property_read_u32(node, key, &val);
@@ -539,10 +728,8 @@
rc = of_property_read_u32(node, "qcom,usb-base-address",
&pdata->usb_base_address);
- if (rc) {
- pr_err("Invalid usb base address property\n");
- return NULL;
- }
+ if (rc)
+ pr_debug("%s: Invalid usb base address property\n", __func__);
pdata->ignore_core_reset_ack = of_property_read_bool(node,
"qcom,ignore-core-reset-ack");
@@ -588,22 +775,28 @@
if (rc)
goto err;
+ if (mem_type == USB_PRIVATE_MEM &&
+ !pdata->usb_base_address)
+ goto err;
+
rc = of_property_read_string(node, "label", &str);
if (rc) {
pr_err("Cannot read string\n");
goto err;
}
- if (strstr(str, "usb-to-peri"))
+ if (strnstr(str, "usb-to", 30))
dir = USB_TO_PEER_PERIPHERAL;
- else if (strstr(str, "peri-to-usb"))
+ else if (strnstr(str, "to-usb", 30))
dir = PEER_PERIPHERAL_TO_USB;
else
goto err;
- /* Check if connection type is suported */
+ /* Check if connection type is supported */
if (!strcmp(str, "usb-to-peri-qdss-dwc3") ||
!strcmp(str, "peri-to-usb-qdss-dwc3") ||
+ !strcmp(str, "usb-to-ipa") ||
+ !strcmp(str, "ipa-to-usb") ||
!strcmp(str, "usb-to-peri-qdss-hsusb") ||
!strcmp(str, "peri-to-usb-qdss-hsusb"))
conn_num = 0;
@@ -772,7 +965,8 @@
dev_dbg(&pdev->dev, "usb_bam_probe\n");
for (i = 0; i < CONNECTIONS_NUM; i++) {
- usb_bam_connections[i].enabled = 0;
+ usb_bam_connections[i].src_enabled = 0;
+ usb_bam_connections[i].dst_enabled = 0;
INIT_WORK(&usb_bam_connections[i].peer_event.wake_w,
usb_bam_wake_work);
}
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 703aca9..b193810 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -151,6 +151,9 @@
struct power_supply *batt_psy;
bool low_voltage_wake_lock_held;
struct wake_lock low_voltage_wake_lock;
+ int soc_calc_period;
+ int normal_voltage_calc_ms;
+ int low_voltage_calc_ms;
};
/*
@@ -1370,6 +1373,7 @@
pr_debug("voltage = %d low holding wakelock\n", vbat_uv);
wake_lock(&chip->low_voltage_wake_lock);
chip->low_voltage_wake_lock_held = 1;
+ chip->soc_calc_period = chip->low_voltage_calc_ms;
}
if (vbat_uv > (chip->v_cutoff + 20) * 1000
@@ -1377,6 +1381,7 @@
pr_debug("voltage = %d releasing wakelock\n", vbat_uv);
chip->low_voltage_wake_lock_held = 0;
wake_unlock(&chip->low_voltage_wake_lock);
+ chip->soc_calc_period = chip->normal_voltage_calc_ms;
}
}
@@ -1811,7 +1816,6 @@
return calculated_soc;
}
-#define CALCULATE_SOC_MS 20000
static void calculate_soc_work(struct work_struct *work)
{
struct pm8921_bms_chip *chip = container_of(work,
@@ -1841,7 +1845,7 @@
schedule_delayed_work(&chip->calculate_soc_delayed_work,
round_jiffies_relative(msecs_to_jiffies
- (CALCULATE_SOC_MS)));
+ (chip->soc_calc_period)));
}
static int report_state_of_charge(struct pm8921_bms_chip *chip)
@@ -2686,7 +2690,9 @@
int ret = 0;
struct pm8921_soc_params raw;
+ mutex_lock(&the_chip->bms_output_lock);
read_soc_params_raw(the_chip, &raw);
+ mutex_unlock(&the_chip->bms_output_lock);
*val = 0;
@@ -2907,6 +2913,12 @@
chip->end_percent = -EINVAL;
chip->shutdown_soc_valid_limit = pdata->shutdown_soc_valid_limit;
chip->adjust_soc_low_threshold = pdata->adjust_soc_low_threshold;
+
+ chip->normal_voltage_calc_ms = pdata->normal_voltage_calc_ms;
+ chip->low_voltage_calc_ms = pdata->low_voltage_calc_ms;
+
+ chip->soc_calc_period = pdata->normal_voltage_calc_ms;
+
if (chip->adjust_soc_low_threshold >= 45)
chip->adjust_soc_low_threshold = 45;
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index cb6b23e..94ef4f9 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -260,11 +260,9 @@
struct dentry *dent;
struct bms_notify bms_notify;
int *usb_trim_table;
- bool keep_btm_on_suspend;
bool ext_charging;
bool ext_charge_done;
bool iusb_fine_res;
- bool dc_unplug_check;
bool disable_hw_clock_switching;
DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
struct work_struct battery_id_valid_work;
@@ -291,6 +289,7 @@
u8 active_path;
int recent_reported_soc;
int battery_less_hardware;
+ int ibatmax_max_adj_ma;
};
/* user space parameter to limit usb current */
@@ -306,8 +305,6 @@
static struct pm8921_chg_chip *the_chip;
-static struct pm8xxx_adc_arb_btm_param btm_config;
-
static int pm_chg_masked_write(struct pm8921_chg_chip *chip, u16 addr,
u8 mask, u8 val)
{
@@ -635,10 +632,26 @@
}
#define PM8921_CHG_IBATMAX_MIN 325
-#define PM8921_CHG_IBATMAX_MAX 2000
+#define PM8921_CHG_IBATMAX_MAX 3025
#define PM8921_CHG_I_MIN_MA 225
#define PM8921_CHG_I_STEP_MA 50
#define PM8921_CHG_I_MASK 0x3F
+static int pm_chg_ibatmax_get(struct pm8921_chg_chip *chip, int *ibat_ma)
+{
+ u8 temp;
+ int rc;
+
+ rc = pm8xxx_readb(chip->dev->parent, CHG_IBAT_MAX, &temp);
+ if (rc) {
+ pr_err("rc = %d while reading ibat max\n", rc);
+ *ibat_ma = 0;
+ return rc;
+ }
+ *ibat_ma = (int)(temp & PM8921_CHG_I_MASK) * PM8921_CHG_I_STEP_MA
+ + PM8921_CHG_I_MIN_MA;
+ return 0;
+}
+
static int pm_chg_ibatmax_set(struct pm8921_chg_chip *chip, int chg_current)
{
u8 temp;
@@ -930,6 +943,11 @@
break;
}
+ if (i < 0) {
+ pr_err("can't find %d in usb_ma_table. Use min.\n", temp);
+ i = 0;
+ }
+
*mA = usb_ma_table[i].usb_ma;
return rc;
@@ -1109,31 +1127,6 @@
PM8921_CHG_LED_SRC_CONFIG_MASK, temp);
}
-static void enable_input_voltage_regulation(struct pm8921_chg_chip *chip)
-{
- u8 temp;
- int rc;
-
- rc = pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0x70);
- if (rc) {
- pr_err("Failed to write 0x70 to CTRL_TEST3 rc = %d\n", rc);
- return;
- }
- rc = pm8xxx_readb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, &temp);
- if (rc) {
- pr_err("Failed to read CTRL_TEST3 rc = %d\n", rc);
- return;
- }
- /* unset the input voltage disable bit */
- temp &= 0xFE;
- /* set the write bit */
- temp |= 0x80;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, temp);
- if (rc) {
- pr_err("Failed to write 0x%x to CTRL_TEST3 rc=%d\n", temp, rc);
- return;
- }
-}
static int64_t read_battery_id(struct pm8921_chg_chip *chip)
{
@@ -1434,15 +1427,6 @@
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_ONLINE:
val->intval = 0;
- if (charging_disabled)
- return 0;
-
- /*
- * if drawing any current from usb is disabled behave
- * as if no usb cable is connected
- */
- if (pm_is_chg_charge_dis(the_chip))
- return 0;
/* USB charging */
if (usb_target_ma < USB_WALL_THRESHOLD_MA)
@@ -1517,6 +1501,34 @@
return pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ);
}
+static int get_prop_batt_status(struct pm8921_chg_chip *chip)
+{
+ int batt_state = POWER_SUPPLY_STATUS_DISCHARGING;
+ int fsm_state = pm_chg_get_fsm_state(chip);
+ int i;
+
+ if (chip->ext_psy) {
+ if (chip->ext_charge_done)
+ return POWER_SUPPLY_STATUS_FULL;
+ if (chip->ext_charging)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (map[i].fsm_state == fsm_state)
+ batt_state = map[i].batt_state;
+
+ if (fsm_state == FSM_STATE_ON_CHG_HIGHI_1) {
+ if (!pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ)
+ || !pm_chg_get_rt_status(chip, BAT_TEMP_OK_IRQ)
+ || pm_chg_get_rt_status(chip, CHGHOT_IRQ)
+ || pm_chg_get_rt_status(chip, VBATDET_LOW_IRQ))
+
+ batt_state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ }
+ return batt_state;
+}
+
static int get_prop_batt_capacity(struct pm8921_chg_chip *chip)
{
int percent_soc;
@@ -1536,8 +1548,8 @@
pr_warn_ratelimited("low battery charge = %d%%\n",
percent_soc);
- if (chip->recent_reported_soc == (chip->resume_charge_percent + 1)
- && percent_soc == chip->resume_charge_percent) {
+ if (percent_soc <= chip->resume_charge_percent
+ && get_prop_batt_status(chip) == POWER_SUPPLY_STATUS_FULL) {
pr_debug("soc fell below %d. charging enabled.\n",
chip->resume_charge_percent);
if (chip->is_bat_warm)
@@ -1666,34 +1678,6 @@
return POWER_SUPPLY_CHARGE_TYPE_NONE;
}
-static int get_prop_batt_status(struct pm8921_chg_chip *chip)
-{
- int batt_state = POWER_SUPPLY_STATUS_DISCHARGING;
- int fsm_state = pm_chg_get_fsm_state(chip);
- int i;
-
- if (chip->ext_psy) {
- if (chip->ext_charge_done)
- return POWER_SUPPLY_STATUS_FULL;
- if (chip->ext_charging)
- return POWER_SUPPLY_STATUS_CHARGING;
- }
-
- for (i = 0; i < ARRAY_SIZE(map); i++)
- if (map[i].fsm_state == fsm_state)
- batt_state = map[i].batt_state;
-
- if (fsm_state == FSM_STATE_ON_CHG_HIGHI_1) {
- if (!pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ)
- || !pm_chg_get_rt_status(chip, BAT_TEMP_OK_IRQ)
- || pm_chg_get_rt_status(chip, CHGHOT_IRQ)
- || pm_chg_get_rt_status(chip, VBATDET_LOW_IRQ))
-
- batt_state = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
- return batt_state;
-}
-
#define MAX_TOLERABLE_BATT_TEMP_DDC 680
static int get_prop_batt_temp(struct pm8921_chg_chip *chip)
{
@@ -1815,7 +1799,7 @@
return;
}
- if (mA >= 0 && mA <= 2) {
+ if (mA <= 2) {
usb_chg_current = 0;
rc = pm_chg_iusbmax_set(the_chip, 0);
if (rc) {
@@ -1833,6 +1817,12 @@
break;
}
+ if (i < 0) {
+ pr_err("can't find %dmA in usb_ma_table. Use min.\n",
+ mA);
+ i = 0;
+ }
+
/* Check if IUSB_FINE_RES is available */
while ((usb_ma_table[i].value & PM8917_IUSB_FINE_RES)
&& !the_chip->iusb_fine_res)
@@ -1902,22 +1892,6 @@
}
EXPORT_SYMBOL_GPL(pm8921_charger_vbus_draw);
-int pm8921_charger_enable(bool enable)
-{
- int rc;
-
- if (!the_chip) {
- pr_err("called before init\n");
- return -EINVAL;
- }
- enable = !!enable;
- rc = pm_chg_auto_enable(the_chip, enable);
- if (rc)
- pr_err("Failed rc=%d\n", rc);
- return rc;
-}
-EXPORT_SYMBOL(pm8921_charger_enable);
-
int pm8921_is_usb_chg_plugged_in(void)
{
if (!the_chip) {
@@ -2248,7 +2222,6 @@
usb_target_ma = 0;
pm8921_chg_disable_irq(chip, CHG_GONE_IRQ);
}
- enable_input_voltage_regulation(chip);
bms_notify_check(chip);
}
@@ -2310,7 +2283,10 @@
pr_warn("%s. battery temperature not ok.\n", __func__);
return;
}
- pm8921_disable_source_current(true); /* Force BATFET=ON */
+
+ /* Force BATFET=ON */
+ pm8921_disable_source_current(true);
+
vbat_ov = pm_chg_get_rt_status(chip, VBAT_OV_IRQ);
if (vbat_ov) {
pr_warn("%s. battery over voltage.\n", __func__);
@@ -2320,16 +2296,17 @@
schedule_delayed_work(&chip->unplug_check_work,
round_jiffies_relative(msecs_to_jiffies
(UNPLUG_CHECK_WAIT_PERIOD_MS)));
- pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
power_supply_set_online(chip->ext_psy, dc_present);
power_supply_set_charge_type(chip->ext_psy,
POWER_SUPPLY_CHARGE_TYPE_FAST);
- power_supply_changed(&chip->dc_psy);
chip->ext_charging = true;
chip->ext_charge_done = false;
bms_notify_check(chip);
- /* Start BMS */
+ /*
+ * since we wont get a fastchg irq from external charger
+ * use eoc worker to detect end of charging
+ */
schedule_delayed_work(&chip->eoc_work, delay);
wake_lock(&chip->eoc_wake_lock);
/* Update battery charging LEDs and user space battery info */
@@ -2530,6 +2507,13 @@
while (!the_chip->iusb_fine_res && i > 0
&& (usb_ma_table[i].value & PM8917_IUSB_FINE_RES))
i--;
+
+ if (i < 0) {
+ pr_err("can't find %dmA in usb_ma_table. Use min.\n",
+ *value);
+ i = 0;
+ }
+
*value = usb_ma_table[i].usb_ma;
}
}
@@ -2792,12 +2776,6 @@
}
} else if (active_path & DC_ACTIVE_BIT) {
pr_debug("DC charger active\n");
- /*
- * Some board designs are not prone to reverse boost on DC
- * charging path
- */
- if (!chip->dc_unplug_check)
- return;
} else {
/* No charger active */
if (!(is_usb_chg_plugged_in(chip)
@@ -2891,6 +2869,30 @@
return IRQ_HANDLED;
}
+struct ibatmax_max_adj_entry {
+ int ibat_max_ma;
+ int max_adj_ma;
+};
+
+static struct ibatmax_max_adj_entry ibatmax_adj_table[] = {
+ {975, 300},
+ {1475, 150},
+ {1975, 200},
+ {2475, 250},
+};
+
+static int find_ibat_max_adj_ma(int ibat_target_ma)
+{
+ int i = 0;
+
+ for (i = ARRAY_SIZE(ibatmax_adj_table) - 1; i >= 0; i--) {
+ if (ibat_target_ma <= ibatmax_adj_table[i].ibat_max_ma)
+ break;
+ }
+
+ return ibatmax_adj_table[i].max_adj_ma;
+}
+
static irqreturn_t fastchg_irq_handler(int irq, void *data)
{
struct pm8921_chg_chip *chip = data;
@@ -3046,20 +3048,33 @@
struct pm8921_chg_chip *chip = data;
int dc_present;
+ pm_chg_failed_clear(chip, 1);
dc_present = pm_chg_get_rt_status(chip, DCIN_VALID_IRQ);
- if (chip->ext_psy)
- power_supply_set_online(chip->ext_psy, dc_present);
- chip->dc_present = dc_present;
- if (dc_present)
- handle_start_ext_chg(chip);
- else
- handle_stop_ext_chg(chip);
- if (!chip->ext_psy) {
+ if (chip->dc_present ^ dc_present)
+ pm8921_bms_calibrate_hkadc();
+
+ if (dc_present)
+ pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
+ else
+ pm8921_chg_disable_irq(chip, CHG_GONE_IRQ);
+
+ chip->dc_present = dc_present;
+
+ if (chip->ext_psy) {
+ if (dc_present)
+ handle_start_ext_chg(chip);
+ else
+ handle_stop_ext_chg(chip);
+ } else {
+ if (dc_present)
+ schedule_delayed_work(&chip->unplug_check_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (UNPLUG_CHECK_WAIT_PERIOD_MS)));
power_supply_changed(&chip->dc_psy);
- power_supply_changed(&chip->batt_psy);
}
+ power_supply_changed(&chip->batt_psy);
return IRQ_HANDLED;
}
@@ -3194,6 +3209,95 @@
pm_chg_vddmax_set(chip, adj_vdd_max_mv);
}
+static void set_appropriate_vbatdet(struct pm8921_chg_chip *chip)
+{
+ if (chip->is_bat_cool)
+ pm_chg_vbatdet_set(the_chip,
+ the_chip->cool_bat_voltage
+ - the_chip->resume_voltage_delta);
+ else if (chip->is_bat_warm)
+ pm_chg_vbatdet_set(the_chip,
+ the_chip->warm_bat_voltage
+ - the_chip->resume_voltage_delta);
+ else
+ pm_chg_vbatdet_set(the_chip,
+ the_chip->max_voltage_mv
+ - the_chip->resume_voltage_delta);
+}
+
+static void set_appropriate_battery_current(struct pm8921_chg_chip *chip)
+{
+ unsigned int chg_current = chip->max_bat_chg_current;
+
+ if (chip->is_bat_cool)
+ chg_current = min(chg_current, chip->cool_bat_chg_current);
+
+ if (chip->is_bat_warm)
+ chg_current = min(chg_current, chip->warm_bat_chg_current);
+
+ if (thermal_mitigation != 0 && chip->thermal_mitigation)
+ chg_current = min(chg_current,
+ chip->thermal_mitigation[thermal_mitigation]);
+
+ pm_chg_ibatmax_set(the_chip, chg_current);
+}
+
+#define TEMP_HYSTERISIS_DECIDEGC 20
+static void battery_cool(bool enter)
+{
+ pr_debug("enter = %d\n", enter);
+ if (enter == the_chip->is_bat_cool)
+ return;
+ the_chip->is_bat_cool = enter;
+ if (enter)
+ pm_chg_vddmax_set(the_chip, the_chip->cool_bat_voltage);
+ else
+ pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
+ set_appropriate_battery_current(the_chip);
+ set_appropriate_vbatdet(the_chip);
+}
+
+static void battery_warm(bool enter)
+{
+ pr_debug("enter = %d\n", enter);
+ if (enter == the_chip->is_bat_warm)
+ return;
+ the_chip->is_bat_warm = enter;
+ if (enter)
+ pm_chg_vddmax_set(the_chip, the_chip->warm_bat_voltage);
+ else
+ pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
+
+ set_appropriate_battery_current(the_chip);
+ set_appropriate_vbatdet(the_chip);
+}
+
+static void check_temp_thresholds(struct pm8921_chg_chip *chip)
+{
+ int temp = 0;
+
+ temp = get_prop_batt_temp(chip);
+ pr_debug("temp = %d, warm_thr_temp = %d, cool_thr_temp = %d\n",
+ temp, chip->warm_temp_dc,
+ chip->cool_temp_dc);
+
+ if (chip->warm_temp_dc != INT_MIN) {
+ if (chip->is_bat_warm
+ && temp < chip->warm_temp_dc - TEMP_HYSTERISIS_DECIDEGC)
+ battery_warm(false);
+ else if (!chip->is_bat_warm && temp >= chip->warm_temp_dc)
+ battery_warm(true);
+ }
+
+ if (chip->cool_temp_dc != INT_MIN) {
+ if (chip->is_bat_cool
+ && temp > chip->cool_temp_dc + TEMP_HYSTERISIS_DECIDEGC)
+ battery_cool(false);
+ else if (!chip->is_bat_cool && temp <= chip->cool_temp_dc)
+ battery_cool(true);
+ }
+}
+
enum {
CHG_IN_PROGRESS,
CHG_NOT_IN_PROGRESS,
@@ -3332,8 +3436,7 @@
if (end == CHG_NOT_IN_PROGRESS) {
count = 0;
- wake_unlock(&chip->eoc_wake_lock);
- return;
+ goto eoc_worker_stop;
}
/* If the disable hw clock switching
@@ -3357,21 +3460,6 @@
if (count == CONSECUTIVE_COUNT) {
count = 0;
pr_info("End of Charging\n");
- /* set the vbatdet back, in case it was changed
- * to trigger charging */
- if (chip->is_bat_cool) {
- pm_chg_vbatdet_set(the_chip,
- the_chip->cool_bat_voltage
- - the_chip->resume_voltage_delta);
- } else if (chip->is_bat_warm) {
- pm_chg_vbatdet_set(the_chip,
- the_chip->warm_bat_voltage
- - the_chip->resume_voltage_delta);
- } else {
- pm_chg_vbatdet_set(the_chip,
- the_chip->max_voltage_mv
- - the_chip->resume_voltage_delta);
- }
pm_chg_auto_enable(chip, 0);
@@ -3384,120 +3472,20 @@
chip->bms_notify.is_battery_full = 1;
/* declare end of charging by invoking chgdone interrupt */
chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
- wake_unlock(&chip->eoc_wake_lock);
} else {
+ check_temp_thresholds(chip);
adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
pr_debug("EOC count = %d\n", count);
schedule_delayed_work(&chip->eoc_work,
round_jiffies_relative(msecs_to_jiffies
(EOC_CHECK_PERIOD_MS)));
- }
-}
-
-static void btm_configure_work(struct work_struct *work)
-{
- int rc;
-
- rc = pm8xxx_adc_btm_configure(&btm_config);
- if (rc)
- pr_err("failed to configure btm rc=%d", rc);
-}
-
-DECLARE_WORK(btm_config_work, btm_configure_work);
-
-static void set_appropriate_battery_current(struct pm8921_chg_chip *chip)
-{
- unsigned int chg_current = chip->max_bat_chg_current;
-
- if (chip->is_bat_cool)
- chg_current = min(chg_current, chip->cool_bat_chg_current);
-
- if (chip->is_bat_warm)
- chg_current = min(chg_current, chip->warm_bat_chg_current);
-
- if (thermal_mitigation != 0 && chip->thermal_mitigation)
- chg_current = min(chg_current,
- chip->thermal_mitigation[thermal_mitigation]);
-
- pm_chg_ibatmax_set(the_chip, chg_current);
-}
-
-#define TEMP_HYSTERISIS_DEGC 2
-static void battery_cool(bool enter)
-{
- pr_debug("enter = %d\n", enter);
- if (enter == the_chip->is_bat_cool)
return;
- the_chip->is_bat_cool = enter;
- if (enter) {
- btm_config.low_thr_temp =
- the_chip->cool_temp_dc + TEMP_HYSTERISIS_DEGC;
- set_appropriate_battery_current(the_chip);
- pm_chg_vddmax_set(the_chip, the_chip->cool_bat_voltage);
- pm_chg_vbatdet_set(the_chip,
- the_chip->cool_bat_voltage
- - the_chip->resume_voltage_delta);
- } else {
- btm_config.low_thr_temp = the_chip->cool_temp_dc;
- set_appropriate_battery_current(the_chip);
- pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
- pm_chg_vbatdet_set(the_chip,
- the_chip->max_voltage_mv
- - the_chip->resume_voltage_delta);
}
- schedule_work(&btm_config_work);
-}
-static void battery_warm(bool enter)
-{
- pr_debug("enter = %d\n", enter);
- if (enter == the_chip->is_bat_warm)
- return;
- the_chip->is_bat_warm = enter;
- if (enter) {
- btm_config.high_thr_temp =
- the_chip->warm_temp_dc - TEMP_HYSTERISIS_DEGC;
- set_appropriate_battery_current(the_chip);
- pm_chg_vddmax_set(the_chip, the_chip->warm_bat_voltage);
- pm_chg_vbatdet_set(the_chip,
- the_chip->warm_bat_voltage
- - the_chip->resume_voltage_delta);
- } else {
- btm_config.high_thr_temp = the_chip->warm_temp_dc;
- set_appropriate_battery_current(the_chip);
- pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
- pm_chg_vbatdet_set(the_chip,
- the_chip->max_voltage_mv
- - the_chip->resume_voltage_delta);
- }
- schedule_work(&btm_config_work);
-}
-
-static int configure_btm(struct pm8921_chg_chip *chip)
-{
- int rc;
-
- if (chip->warm_temp_dc != INT_MIN)
- btm_config.btm_warm_fn = battery_warm;
- else
- btm_config.btm_warm_fn = NULL;
-
- if (chip->cool_temp_dc != INT_MIN)
- btm_config.btm_cool_fn = battery_cool;
- else
- btm_config.btm_cool_fn = NULL;
-
- btm_config.low_thr_temp = chip->cool_temp_dc;
- btm_config.high_thr_temp = chip->warm_temp_dc;
- btm_config.interval = chip->temp_check_period;
- rc = pm8xxx_adc_btm_configure(&btm_config);
- if (rc)
- pr_err("failed to configure btm rc = %d\n", rc);
- rc = pm8xxx_adc_btm_start();
- if (rc)
- pr_err("failed to start btm rc = %d\n", rc);
-
- return rc;
+eoc_worker_stop:
+ wake_unlock(&chip->eoc_wake_lock);
+ /* set the vbatdet back, in case it was changed to trigger charging */
+ set_appropriate_vbatdet(chip);
}
/**
@@ -4207,6 +4195,81 @@
}
DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
+static int reg_loop;
+#define MAX_REG_LOOP_CHAR 10
+static int get_reg_loop_param(char *buf, struct kernel_param *kp)
+{
+ u8 temp;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ temp = pm_chg_get_regulation_loop(the_chip);
+ return snprintf(buf, MAX_REG_LOOP_CHAR, "%d", temp);
+}
+module_param_call(reg_loop, NULL, get_reg_loop_param,
+ ®_loop, 0644);
+
+static int max_chg_ma;
+#define MAX_MA_CHAR 10
+static int get_max_chg_ma_param(char *buf, struct kernel_param *kp)
+{
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ return snprintf(buf, MAX_MA_CHAR, "%d", the_chip->max_bat_chg_current);
+}
+module_param_call(max_chg_ma, NULL, get_max_chg_ma_param,
+ &max_chg_ma, 0644);
+static int ibatmax_ma;
+static int set_ibat_max(const char *val, struct kernel_param *kp)
+{
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("error setting value %d\n", rc);
+ return rc;
+ }
+
+ if (abs(ibatmax_ma - the_chip->max_bat_chg_current)
+ <= the_chip->ibatmax_max_adj_ma) {
+ rc = pm_chg_ibatmax_set(the_chip, ibatmax_ma);
+ if (rc) {
+ pr_err("Failed to set ibatmax rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+static int get_ibat_max(char *buf, struct kernel_param *kp)
+{
+ int ibat_ma;
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = pm_chg_ibatmax_get(the_chip, &ibat_ma);
+ if (rc) {
+ pr_err("ibatmax_get error = %d\n", rc);
+ return rc;
+ }
+
+ return snprintf(buf, MAX_MA_CHAR, "%d", ibat_ma);
+}
+module_param_call(ibatmax_ma, set_ibat_max, get_ibat_max,
+ &ibatmax_ma, 0644);
enum {
BAT_WARM_ZONE,
BAT_COOL_ZONE,
@@ -4334,19 +4397,8 @@
static int pm8921_charger_resume(struct device *dev)
{
- int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (!(chip->cool_temp_dc == INT_MIN && chip->warm_temp_dc == INT_MIN)
- && !(chip->keep_btm_on_suspend)) {
- rc = pm8xxx_adc_btm_configure(&btm_config);
- if (rc)
- pr_err("couldn't reconfigure btm rc=%d\n", rc);
-
- rc = pm8xxx_adc_btm_start();
- if (rc)
- pr_err("couldn't restart btm rc=%d\n", rc);
- }
if (pm8921_chg_is_enabled(chip, LOOP_CHANGE_IRQ)) {
disable_irq_wake(chip->pmic_chg_irq[LOOP_CHANGE_IRQ]);
pm8921_chg_disable_irq(chip, LOOP_CHANGE_IRQ);
@@ -4359,18 +4411,14 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (!(chip->cool_temp_dc == INT_MIN && chip->warm_temp_dc == INT_MIN)
- && !(chip->keep_btm_on_suspend)) {
- rc = pm8xxx_adc_btm_end();
- if (rc)
- pr_err("Failed to disable BTM on suspend rc=%d\n", rc);
- }
-
if (is_usb_chg_plugged_in(chip)) {
pm8921_chg_enable_irq(chip, LOOP_CHANGE_IRQ);
enable_irq_wake(chip->pmic_chg_irq[LOOP_CHANGE_IRQ]);
}
+ rc = pm8xxx_batt_alarm_enable(PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+ if (rc < 0)
+ pr_err("Failed to enable lower comparator\n");
return 0;
}
static int __devinit pm8921_charger_probe(struct platform_device *pdev)
@@ -4420,13 +4468,11 @@
chip->warm_temp_dc = INT_MIN;
chip->temp_check_period = pdata->temp_check_period;
- chip->dc_unplug_check = pdata->dc_unplug_check;
chip->max_bat_chg_current = pdata->max_bat_chg_current;
chip->cool_bat_chg_current = pdata->cool_bat_chg_current;
chip->warm_bat_chg_current = pdata->warm_bat_chg_current;
chip->cool_bat_voltage = pdata->cool_bat_voltage;
chip->warm_bat_voltage = pdata->warm_bat_voltage;
- chip->keep_btm_on_suspend = pdata->keep_btm_on_suspend;
chip->trkl_voltage = pdata->trkl_voltage;
chip->weak_voltage = pdata->weak_voltage;
chip->trkl_current = pdata->trkl_current;
@@ -4445,6 +4491,9 @@
if (chip->battery_less_hardware)
charging_disabled = 1;
+ chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
+ chip->max_bat_chg_current);
+
rc = pm8921_chg_hw_init(chip);
if (rc) {
pr_err("couldn't init hardware rc=%d\n", rc);
@@ -4517,17 +4566,6 @@
enable_irq_wake(chip->pmic_chg_irq[BAT_TEMP_OK_IRQ]);
enable_irq_wake(chip->pmic_chg_irq[VBATDET_LOW_IRQ]);
enable_irq_wake(chip->pmic_chg_irq[FASTCHG_IRQ]);
- /*
- * if both the cool_temp_dc and warm_temp_dc are invalid device doesnt
- * care for jeita compliance
- */
- if (!(chip->cool_temp_dc == INT_MIN && chip->warm_temp_dc == INT_MIN)) {
- rc = configure_btm(chip);
- if (rc) {
- pr_err("couldn't register with btm rc=%d\n", rc);
- goto free_irq;
- }
- }
rc = pm8921_charger_configure_batt_alarm(chip);
if (rc) {
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 1955ff4..3b813d8 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -1950,6 +1950,18 @@
goto error_read;
}
+ rc = qpnp_vadc_is_ready();
+ if (rc) {
+ pr_info("vadc not ready: %d, deferring probe\n", rc);
+ goto error_read;
+ }
+
+ rc = qpnp_iadc_is_ready();
+ if (rc) {
+ pr_info("iadc not ready: %d, deferring probe\n", rc);
+ goto error_read;
+ }
+
rc = set_battery_data(chip);
if (rc) {
pr_err("Bad battery data %d\n", rc);
@@ -1996,7 +2008,7 @@
vbatt = 0;
get_battery_voltage(&vbatt);
- pr_info("OK battery_capacity_at_boot=%d vbatt = %d\n",
+ pr_debug("OK battery_capacity_at_boot=%d vbatt = %d\n",
get_prop_bms_capacity(chip),
vbatt);
pr_info("probe success\n");
diff --git a/drivers/power/smb137c-charger.c b/drivers/power/smb137c-charger.c
index b865bd7..9cdf5b5 100644
--- a/drivers/power/smb137c-charger.c
+++ b/drivers/power/smb137c-charger.c
@@ -992,29 +992,47 @@
{
struct smb137c_chip *chip = container_of(psy, struct smb137c_chip, psy);
union power_supply_propval prop = {0,};
+ int scope = POWER_SUPPLY_SCOPE_DEVICE;
+ int current_limit = USB_MIN_CURRENT_UA;
+ int online = 0;
+ int rc;
mutex_lock(&chip->lock);
dev_dbg(&chip->client->dev, "%s: start\n", __func__);
- chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_ONLINE,
- &prop);
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB online property, rc=%d\n",
+ __func__, rc);
+ else
+ online = prop.intval;
- if (prop.intval) {
- /* USB online */
- chip->usb_psy->get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_SCOPE, &prop);
- if (prop.intval == POWER_SUPPLY_SCOPE_SYSTEM) {
- /* USB host mode */
- smb137c_enable_otg_mode(chip);
- smb137c_disable_charging(chip);
- } else {
- /* USB device mode */
- chip->usb_psy->get_property(chip->usb_psy,
+ rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_SCOPE,
+ &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB scope property, rc=%d\n",
+ __func__, rc);
+ else
+ scope = prop.intval;
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
- smb137c_set_usb_input_current_limit(chip, prop.intval);
- smb137c_enable_charging(chip);
- smb137c_disable_otg_mode(chip);
- }
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB current_max property, rc=%d\n",
+ __func__, rc);
+ else
+ current_limit = prop.intval;
+
+ if (scope == POWER_SUPPLY_SCOPE_SYSTEM) {
+ /* USB host mode */
+ smb137c_disable_charging(chip);
+ smb137c_enable_otg_mode(chip);
+ } else if (online) {
+ /* USB online in device mode */
+ smb137c_set_usb_input_current_limit(chip, current_limit);
+ smb137c_enable_charging(chip);
+ smb137c_disable_otg_mode(chip);
} else {
/* USB offline */
smb137c_disable_charging(chip);
@@ -1318,7 +1336,6 @@
};
MODULE_DEVICE_TABLE(i2c, smb137c_id);
-/* TODO: should this be "summit,smb137c-charger"? */
static const struct of_device_id smb137c_match[] = {
{ .compatible = "summit,smb137c", },
{ },
diff --git a/drivers/power/smb350_charger.c b/drivers/power/smb350_charger.c
index dc0c4bd..21d7aea 100644
--- a/drivers/power/smb350_charger.c
+++ b/drivers/power/smb350_charger.c
@@ -229,20 +229,18 @@
return power_ok;
}
-static bool smb350_is_charging(struct i2c_client *client)
+static bool smb350_is_charger_present(struct i2c_client *client)
{
int val;
- bool is_charging;
+ /* Normally the device is non-removable and embedded on the board.
+ * Verify that charger is present by getting I2C response.
+ */
val = smb350_read_reg(client, STATUS_B_REG);
if (val < 0)
return false;
- val = (val >> 1) & 0x3;
-
- is_charging = (val != 0);
-
- return is_charging;
+ return true;
}
static int smb350_get_prop_charge_type(struct smb350_device *dev)
@@ -408,10 +406,10 @@
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = smb350_is_dc_present(client);
+ val->intval = smb350_is_charger_present(client);
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = smb350_is_charging(client);
+ val->intval = smb350_is_dc_present(client);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = smb350_get_prop_charge_type(dev);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 57cde45..0ebb944 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1897,6 +1897,8 @@
if (rdev->desc->ops->list_voltage)
selector = rdev->desc->ops->list_voltage(rdev,
selector);
+ else if (rdev->desc->ops->get_voltage)
+ selector = rdev->desc->ops->get_voltage(rdev);
else
selector = -1;
} else if (rdev->desc->ops->set_voltage_sel) {
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 0549593..a330f1b 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -550,11 +550,12 @@
}
static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
- int min_uV, int max_uV, int *range_sel, int *voltage_sel)
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
{
struct qpnp_voltage_range *range;
int uV = min_uV;
- int lim_min_uV, lim_max_uV, i;
+ int lim_min_uV, lim_max_uV, i, range_id;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -575,7 +576,8 @@
for (i = vreg->set_points->count - 1; i > 0; i--)
if (uV > vreg->set_points->range[i - 1].max_uV)
break;
- range = &vreg->set_points->range[i];
+ range_id = i;
+ range = &vreg->set_points->range[range_id];
*range_sel = range->range_sel;
/*
@@ -594,6 +596,11 @@
return -EINVAL;
}
+ *selector = 0;
+ for (i = 0; i < range_id; i++)
+ *selector += vreg->set_points->range[i].n_voltages;
+ *selector += (uV - range->set_point_min_uV) / range->step_uV;
+
return 0;
}
@@ -605,7 +612,7 @@
u8 buf[2];
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
@@ -669,7 +676,7 @@
int rc, range_sel, voltage_sel;
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index c26da60..0497a32 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,17 +30,18 @@
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/debugfs.h>
-#include <mach/msm_spi.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <mach/dma.h>
-#include <asm/atomic.h>
-#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/remote_spinlock.h>
#include <linux/pm_qos.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <mach/msm_spi.h>
+#include <mach/sps.h>
+#include <mach/dma.h>
#include "spi_qsd.h"
static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
@@ -211,16 +212,19 @@
&dd->output_block_size, block, mult)) {
goto fifo_size_err;
}
- /* DM mode is not available for this block size */
- if (dd->input_block_size == 4 || dd->output_block_size == 4)
- dd->use_dma = 0;
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ /* DM mode is not available for this block size */
+ if (dd->input_block_size == 4 || dd->output_block_size == 4)
+ dd->use_dma = 0;
- /* DM mode is currently unsupported for different block sizes */
- if (dd->input_block_size != dd->output_block_size)
- dd->use_dma = 0;
+ /* DM mode is currently unsupported for different block sizes */
+ if (dd->input_block_size != dd->output_block_size)
+ dd->use_dma = 0;
- if (dd->use_dma)
- dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
+ if (dd->use_dma)
+ dd->burst_size = max(dd->input_block_size,
+ DM_BURST_SIZE);
+ }
return;
@@ -352,14 +356,19 @@
return 0;
}
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
{
*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
if (n != (*config & SPI_CFG_N))
*config = (*config & ~SPI_CFG_N) | n;
- if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
+ if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
+ || (dd->mode == SPI_BAM_MODE)) {
if (dd->read_buf == NULL)
*config |= SPI_NO_INPUT;
if (dd->write_buf == NULL)
@@ -367,23 +376,207 @@
}
}
-static void msm_spi_set_config(struct msm_spi *dd, int bpw)
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
{
- u32 spi_config;
-
- spi_config = readl_relaxed(dd->base + SPI_CONFIG);
-
- if (dd->cur_msg->spi->mode & SPI_CPHA)
- spi_config &= ~SPI_CFG_INPUT_FIRST;
- else
- spi_config |= SPI_CFG_INPUT_FIRST;
- if (dd->cur_msg->spi->mode & SPI_LOOP)
+ if (mode & SPI_LOOP)
spi_config |= SPI_CFG_LOOPBACK;
else
spi_config &= ~SPI_CFG_LOOPBACK;
- msm_spi_add_configs(dd, &spi_config, bpw-1);
+
+ if (mode & SPI_CPHA)
+ spi_config &= ~SPI_CFG_INPUT_FIRST;
+ else
+ spi_config |= SPI_CFG_INPUT_FIRST;
+
+ return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+ u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+ spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+ spi_config, dd->cur_msg->spi->mode);
+
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+ /* flags removed from SPI_CONFIG in QUP version-2 */
+ msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+ else if (dd->mode == SPI_BAM_MODE)
+ spi_config |= SPI_CFG_INPUT_FIRST;
+
writel_relaxed(spi_config, dd->base + SPI_CONFIG);
- msm_spi_set_qup_config(dd, bpw);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+ /*
+ * n_words cannot exceed fifo_size, and only one READ COUNT
+ * interrupt is generated per transaction, so for transactions
+ * larger than fifo size READ COUNT must be disabled.
+ * For those transactions we usually move to Data Mover mode.
+ */
+ if (dd->mode == SPI_FIFO_MODE) {
+ if (n_words <= dd->input_fifo_size) {
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, n_words);
+ } else {
+ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, 0);
+ }
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+ /* must be zero for FIFO */
+ writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+ writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+ }
+ } else {
+ /* must be zero for BAM and DMOV */
+ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, 0);
+
+ /*
+ * for DMA transfers, both QUP_MX_INPUT_COUNT and
+ * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+ * That case is a non-balanced transfer when there is
+ * only a read_buf.
+ */
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+ if (dd->write_buf)
+ writel_relaxed(0,
+ dd->base + SPI_MX_INPUT_COUNT);
+ else
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_INPUT_COUNT);
+
+ writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+ }
+ }
+}
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time. If another tsranfer is
+ * required, it waits for the trasfer to finish, then moving to reset
+ * state, and back to run state to issue the next transfer.
+ * The function dose not wait for the last transfer to end, or if only
+ * a single transfer is required, the function dose not wait for it to
+ * end.
+ * @timeout max time in jiffies to wait for a transfer to finish.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
+{
+ u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
+ int ret;
+ /*
+ * QUP must move to reset mode every 64K-1 bytes of transfer
+ * (counter is 16 bit)
+ */
+ if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
+ /* assert chip select unconditionally */
+ u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+ if (!(spi_ioc & SPI_IO_C_FORCE_CS))
+ writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
+ dd->base + SPI_IO_CONTROL);
+ }
+
+ /* Following flags are required since we are waiting on all transfers */
+ cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+ /*
+ * on a balanced transaction, BAM will set the flags on the producer
+ * pipe based on the flags set on the consumer pipe
+ */
+ prod_flags = (dd->write_buf) ? 0 : cons_flags;
+
+ while (dd->tx_bytes_remaining > 0) {
+ bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
+ bytes_to_send = min_t(u32, dd->tx_bytes_remaining
+ , SPI_MAX_TRFR_BTWN_RESETS);
+ n_words_xfr = DIV_ROUND_UP(bytes_to_send
+ , dd->bytes_per_word);
+
+ msm_spi_set_mx_counts(dd, n_words_xfr);
+
+ ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to set QUP state to run",
+ __func__);
+ goto xfr_err;
+ }
+
+ /* enqueue read buffer in BAM */
+ if (dd->read_buf) {
+ ret = sps_transfer_one(dd->bam.prod.handle,
+ dd->cur_transfer->rx_dma + bytes_sent,
+ bytes_to_send, dd, prod_flags);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to queue producer BAM transfer",
+ __func__);
+ goto xfr_err;
+ }
+ }
+
+ /* enqueue write buffer in BAM */
+ if (dd->write_buf) {
+ ret = sps_transfer_one(dd->bam.cons.handle,
+ dd->cur_transfer->tx_dma + bytes_sent,
+ bytes_to_send, dd, cons_flags);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to queue consumer BAM transfer",
+ __func__);
+ goto xfr_err;
+ }
+ }
+
+ dd->tx_bytes_remaining -= bytes_to_send;
+
+ /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
+ if (dd->tx_bytes_remaining > 0) {
+ if (!wait_for_completion_timeout(
+ &dd->transfer_complete, timeout)) {
+ dev_err(dd->dev,
+ "%s: SPI transaction timeout",
+ __func__);
+ dd->cur_msg->status = -EIO;
+ ret = -EIO;
+ goto xfr_err;
+ }
+ ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to set QUP state to reset",
+ __func__);
+ goto xfr_err;
+ }
+ init_completion(&dd->transfer_complete);
+ }
+ }
+ return 0;
+
+xfr_err:
+ return ret;
}
static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
@@ -767,7 +960,15 @@
return IRQ_HANDLED;
}
-static int msm_spi_map_dma_buffers(struct msm_spi *dd)
+/**
+ * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
+ * @return zero on success or negative error code
+ *
+ * calls dma_map_single() on the read/write buffers, effectively invalidating
+ * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
+ * buffer and copy the data to/from the client buffers
+ */
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
{
struct device *dev;
struct spi_transfer *first_xfr;
@@ -847,7 +1048,7 @@
return ret;
}
-static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
+static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
{
struct device *dev;
u32 offset;
@@ -914,56 +1115,190 @@
}
}
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+ struct device *dev;
+
+ /* mapped by client */
+ if (dd->cur_msg->is_dma_mapped)
+ return;
+
+ dev = &dd->cur_msg->spi->dev;
+ if (dd->cur_transfer->rx_buf)
+ dma_unmap_single(dev, dd->cur_transfer->rx_dma,
+ dd->cur_transfer->len,
+ DMA_FROM_DEVICE);
+
+ if (dd->cur_transfer->tx_buf)
+ dma_unmap_single(dev, dd->cur_transfer->tx_dma,
+ dd->cur_transfer->len,
+ DMA_TO_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+ if (dd->mode == SPI_DMOV_MODE)
+ msm_spi_dmov_unmap_buffers(dd);
+ else if (dd->mode == SPI_BAM_MODE)
+ msm_spi_bam_unmap_buffers(dd);
+}
+
/**
- * msm_use_dm - decides whether to use data mover for this
- * transfer
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
* @dd: device
* @tr: transfer
*
- * Start using DM if:
- * 1. Transfer is longer than 3*block size.
- * 2. Buffers should be aligned to cache line.
- * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platfrom data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
*/
-static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
- u8 bpw)
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
{
- u32 cache_line = dma_get_cache_alignment();
-
if (!dd->use_dma)
- return 0;
+ return false;
+
+ /* check constraints from platform data */
+ if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+ return false;
if (dd->cur_msg_len < 3*dd->input_block_size)
- return 0;
+ return false;
if (dd->multi_xfr && !dd->read_len && !dd->write_len)
- return 0;
+ return false;
- if (tr->tx_buf) {
- if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
- return 0;
- }
- if (tr->rx_buf) {
- if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
- return 0;
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ u32 cache_line = dma_get_cache_alignment();
+
+ if (tr->tx_buf) {
+ if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+ return 0;
+ }
+ if (tr->rx_buf) {
+ if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+ return false;
+ }
+
+ if (tr->cs_change &&
+ ((bpw != 8) || (bpw != 16) || (bpw != 32)))
+ return false;
}
- if (tr->cs_change &&
- ((bpw != 8) || (bpw != 16) || (bpw != 32)))
- return 0;
- return 1;
+ return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+ if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+ if (dd->qup_ver) {
+ dd->mode = SPI_BAM_MODE;
+ } else {
+ dd->mode = SPI_DMOV_MODE;
+ if (dd->write_len && dd->read_len) {
+ dd->tx_bytes_remaining = dd->write_len;
+ dd->rx_bytes_remaining = dd->read_len;
+ }
+ }
+ } else {
+ dd->mode = SPI_FIFO_MODE;
+ if (dd->multi_xfr) {
+ dd->read_len = dd->cur_transfer->len;
+ dd->write_len = dd->cur_transfer->len;
+ }
+ }
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+ u32 spi_iom;
+ spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+ /* Set input and output transfer mode: FIFO, DMOV, or BAM */
+ spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+ spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
+ spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
+ /* Turn on packing for data mover */
+ if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
+ spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+ else
+ spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+
+ /*if (dd->mode == SPI_BAM_MODE) {
+ spi_iom |= SPI_IO_C_NO_TRI_STATE;
+ spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
+ }*/
+ writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+ if (mode & SPI_CPOL)
+ spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+ return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+ u32 spi_ioc, spi_ioc_orig, chip_select;
+
+ spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+ spi_ioc_orig = spi_ioc;
+ spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+ , dd->cur_msg->spi->mode);
+ /* Set chip-select */
+ chip_select = dd->cur_msg->spi->chip_select << 2;
+ if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+ spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+ if (!dd->cur_transfer->cs_change)
+ spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+ if (spi_ioc != spi_ioc_orig)
+ writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+ return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+ /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+ * change in BAM mode */
+ u32 mask = (dd->mode == SPI_BAM_MODE) ?
+ QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+ : 0;
+ writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
}
static void msm_spi_process_transfer(struct msm_spi *dd)
{
u8 bpw;
- u32 spi_ioc;
- u32 spi_iom;
- u32 spi_ioc_orig;
u32 max_speed;
- u32 chip_select;
u32 read_count;
u32 timeout;
+ u32 spi_ioc;
u32 int_loopback = 0;
dd->tx_bytes_remaining = dd->cur_msg_len;
@@ -987,6 +1322,10 @@
if (!dd->clock_speed || max_speed != dd->clock_speed)
msm_spi_clock_set(dd, max_speed);
+ timeout = 100 * msecs_to_jiffies(
+ DIV_ROUND_UP(dd->cur_msg_len * 8,
+ DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
if (dd->cur_msg->spi->mode & SPI_LOOP)
int_loopback = 1;
@@ -1004,60 +1343,24 @@
__func__);
return;
}
- if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
- dd->mode = SPI_FIFO_MODE;
- if (dd->multi_xfr) {
- dd->read_len = dd->cur_transfer->len;
- dd->write_len = dd->cur_transfer->len;
- }
- /* read_count cannot exceed fifo_size, and only one READ COUNT
- interrupt is generated per transaction, so for transactions
- larger than fifo size READ COUNT must be disabled.
- For those transactions we usually move to Data Mover mode.
- */
- if (read_count <= dd->input_fifo_size) {
- writel_relaxed(read_count,
- dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, read_count);
- } else {
- writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, 0);
- }
- } else {
- dd->mode = SPI_DMOV_MODE;
- if (dd->write_len && dd->read_len) {
- dd->tx_bytes_remaining = dd->write_len;
- dd->rx_bytes_remaining = dd->read_len;
- }
- }
- /* Write mode - fifo or data mover*/
- spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
- spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
- spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
- spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
- /* Turn on packing for data mover */
- if (dd->mode == SPI_DMOV_MODE)
- spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
- else
- spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
- writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+ if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+ dev_err(dd->dev,
+ "%s: Error setting QUP to reset-state",
+ __func__);
- msm_spi_set_config(dd, bpw);
-
- spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
- spi_ioc_orig = spi_ioc;
- if (dd->cur_msg->spi->mode & SPI_CPOL)
- spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
- else
- spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
- chip_select = dd->cur_msg->spi->chip_select << 2;
- if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
- spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
- if (!dd->cur_transfer->cs_change)
- spi_ioc |= SPI_IO_C_MX_CS_MODE;
- if (spi_ioc != spi_ioc_orig)
- writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+ msm_spi_set_transfer_mode(dd, bpw, read_count);
+ msm_spi_set_mx_counts(dd, read_count);
+ if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
+ if (msm_spi_dma_map_buffers(dd) < 0) {
+ pr_err("Mapping DMA buffers\n");
+ return;
+ }
+ msm_spi_set_qup_io_modes(dd);
+ msm_spi_set_spi_config(dd, bpw);
+ msm_spi_set_qup_config(dd, bpw);
+ spi_ioc = msm_spi_set_spi_io_control(dd);
+ msm_spi_set_qup_op_mask(dd);
if (dd->mode == SPI_DMOV_MODE) {
msm_spi_setup_dm_transfer(dd);
@@ -1071,27 +1374,35 @@
if (msm_spi_prepare_for_write(dd))
goto transfer_end;
msm_spi_start_write(dd, read_count);
+ } else if (dd->mode == SPI_BAM_MODE) {
+ if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
+ dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+ __func__);
}
- /* Only enter the RUN state after the first word is written into
- the output FIFO. Otherwise, the output FIFO EMPTY interrupt
- might fire before the first word is written resulting in a
- possible race condition.
+ /*
+ * On BAM mode, current state here is run.
+ * Only enter the RUN state after the first word is written into
+ * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+ * might fire before the first word is written resulting in a
+ * possible race condition.
*/
- if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
- goto transfer_end;
-
- timeout = 100 * msecs_to_jiffies(
- DIV_ROUND_UP(dd->cur_msg_len * 8,
- DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+ if (dd->mode != SPI_BAM_MODE)
+ if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+ dev_warn(dd->dev,
+ "%s: Failed to set QUP to run-state. Mode:%d",
+ __func__, dd->mode);
+ goto transfer_end;
+ }
/* Assume success, this might change later upon transaction result */
dd->cur_msg->status = 0;
do {
if (!wait_for_completion_timeout(&dd->transfer_complete,
timeout)) {
- dev_err(dd->dev, "%s: SPI transaction "
- "timeout\n", __func__);
+ dev_err(dd->dev,
+ "%s: SPI transaction timeout\n",
+ __func__);
dd->cur_msg->status = -EIO;
if (dd->mode == SPI_DMOV_MODE) {
msm_dmov_flush(dd->tx_dma_chan, 1);
@@ -1102,8 +1413,7 @@
} while (msm_spi_dm_send_next(dd));
transfer_end:
- if (dd->mode == SPI_DMOV_MODE)
- msm_spi_unmap_dma_buffers(dd);
+ msm_spi_dma_unmap_buffers(dd);
dd->mode = SPI_MODE_NONE;
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
@@ -1266,10 +1576,10 @@
* WR-WR or WR-RD transfers
*/
if ((!dd->cur_msg->is_dma_mapped) &&
- (msm_use_dm(dd, dd->cur_transfer,
+ (msm_spi_use_dma(dd, dd->cur_transfer,
dd->cur_transfer->bits_per_word))) {
/* Mapping of DMA buffers */
- int ret = msm_spi_map_dma_buffers(dd);
+ int ret = msm_spi_dma_map_buffers(dd);
if (ret < 0) {
dd->cur_msg->status = ret;
goto error;
@@ -1474,22 +1784,13 @@
spi_ioc |= mask;
else
spi_ioc &= ~mask;
- if (spi->mode & SPI_CPOL)
- spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
- else
- spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+ spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
spi_config = readl_relaxed(dd->base + SPI_CONFIG);
- if (spi->mode & SPI_LOOP)
- spi_config |= SPI_CFG_LOOPBACK;
- else
- spi_config &= ~SPI_CFG_LOOPBACK;
- if (spi->mode & SPI_CPHA)
- spi_config &= ~SPI_CFG_INPUT_FIRST;
- else
- spi_config |= SPI_CFG_INPUT_FIRST;
+ spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+ spi_config, spi->mode);
writel_relaxed(spi_config, dd->base + SPI_CONFIG);
/* Ensure previous write completed before disabling the clocks */
@@ -1730,7 +2031,7 @@
roundup(dd->burst_size, cache_line))*2;
}
-static void msm_spi_teardown_dma(struct msm_spi *dd)
+static void msm_spi_dmov_teardown(struct msm_spi *dd)
{
int limit = 0;
@@ -1749,7 +2050,171 @@
dd->tx_padding = dd->rx_padding = NULL;
}
-static __init int msm_spi_init_dma(struct msm_spi *dd)
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+ enum msm_spi_pipe_direction pipe_dir)
+{
+ struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+ (&dd->bam.prod) : (&dd->bam.cons);
+ if (!pipe->teardown_required)
+ return;
+
+ sps_disconnect(pipe->handle);
+ dma_free_coherent(dd->dev, pipe->config.desc.size,
+ pipe->config.desc.base, pipe->config.desc.phys_base);
+ sps_free_endpoint(pipe->handle);
+ pipe->handle = 0;
+ pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+ enum msm_spi_pipe_direction pipe_dir)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_register_event event = {0};
+ struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+ (&dd->bam.prod) : (&dd->bam.cons);
+ struct sps_connect *pipe_conf = &pipe->config;
+
+ pipe->handle = 0;
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+ , __func__);
+ return -ENOMEM;
+ }
+
+ memset(pipe_conf, 0, sizeof(*pipe_conf));
+ rc = sps_get_config(pipe_handle, pipe_conf);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+ , __func__);
+ goto config_err;
+ }
+
+ if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+ pipe_conf->source = dd->bam.handle;
+ pipe_conf->destination = SPS_DEV_HANDLE_MEM;
+ pipe_conf->mode = SPS_MODE_SRC;
+ pipe_conf->src_pipe_index =
+ dd->pdata->bam_producer_pipe_index;
+ pipe_conf->dest_pipe_index = 0;
+ } else {
+ pipe_conf->source = SPS_DEV_HANDLE_MEM;
+ pipe_conf->destination = dd->bam.handle;
+ pipe_conf->mode = SPS_MODE_DEST;
+ pipe_conf->src_pipe_index = 0;
+ pipe_conf->dest_pipe_index =
+ dd->pdata->bam_consumer_pipe_index;
+ }
+ pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+ pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+ pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
+ pipe_conf->desc.size,
+ &pipe_conf->desc.phys_base,
+ GFP_KERNEL);
+ if (!pipe_conf->desc.base) {
+ dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
+ , __func__);
+ rc = -ENOMEM;
+ goto config_err;
+ }
+
+ memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+ rc = sps_connect(pipe_handle, pipe_conf);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
+ goto connect_err;
+ }
+
+ event.mode = SPS_TRIGGER_WAIT;
+ event.options = SPS_O_EOT;
+ event.xfer_done = &dd->transfer_complete;
+ event.user = (void *)dd;
+ rc = sps_register_event(pipe_handle, &event);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to register BAM EOT event",
+ __func__);
+ goto register_err;
+ }
+
+ pipe->handle = pipe_handle;
+ pipe->teardown_required = true;
+ return 0;
+
+register_err:
+ sps_disconnect(pipe_handle);
+connect_err:
+ dma_free_coherent(dd->dev, pipe_conf->desc.size,
+ pipe_conf->desc.base, pipe_conf->desc.phys_base);
+config_err:
+ sps_free_endpoint(pipe_handle);
+
+ return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+ msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+ msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+ if (dd->bam.deregister_required) {
+ sps_deregister_bam_device(dd->bam.handle);
+ dd->bam.deregister_required = false;
+ }
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+ struct sps_bam_props bam_props = {0};
+ u32 bam_handle;
+ int rc = 0;
+
+ rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+ if (rc || !bam_handle) {
+ bam_props.phys_addr = dd->bam.phys_addr;
+ bam_props.virt_addr = dd->bam.base;
+ bam_props.irq = dd->bam.irq;
+ bam_props.manage = SPS_BAM_MGR_LOCAL;
+ bam_props.summing_threshold = 0x10;
+
+ rc = sps_register_bam_device(&bam_props, &bam_handle);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to register BAM device",
+ __func__);
+ return rc;
+ }
+ dd->bam.deregister_required = true;
+ }
+
+ dd->bam.handle = bam_handle;
+
+ rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to init producer BAM-pipe",
+ __func__);
+ goto bam_init_error;
+ }
+
+ rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to init consumer BAM-pipe",
+ __func__);
+ goto bam_init_error;
+ }
+
+ return 0;
+
+bam_init_error:
+ msm_spi_bam_teardown(dd);
+ return rc;
+}
+
+static __init int msm_spi_dmov_init(struct msm_spi *dd)
{
dmov_box *box;
u32 cache_line = dma_get_cache_alignment();
@@ -1811,10 +2276,15 @@
return 0;
}
-struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
+/**
+ * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
+ */
+struct msm_spi_platform_data *
+__init msm_spi_dt_to_pdata(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct msm_spi_platform_data *pdata;
+ int rc;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
@@ -1827,9 +2297,76 @@
of_property_read_u32(node, "infinite_mode",
&pdata->infinite_mode);
+ pdata->ver_reg_exists = of_property_read_bool(node
+ , "qcom,ver-reg-exists");
+
+ pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
+
+ if (pdata->use_bam) {
+ rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
+ &pdata->bam_consumer_pipe_index);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+ pdata->use_bam = false;
+ }
+
+ rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
+ &pdata->bam_producer_pipe_index);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "missing qcom,bam-producer-pipe-index entry in device-tree\n");
+ pdata->use_bam = false;
+ }
+ }
return pdata;
}
+static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+ u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+ return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+ : SPI_QUP_VERSION_NONE;
+}
+
+static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
+ struct platform_device *pdev, struct spi_master *master)
+{
+ struct resource *resource;
+ size_t bam_mem_size;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "spi_bam_physical");
+ if (!resource) {
+ dev_warn(&pdev->dev,
+ "%s: Missing spi_bam_physical entry in DT",
+ __func__);
+ return -ENXIO;
+ }
+
+ dd->bam.phys_addr = resource->start;
+ bam_mem_size = resource_size(resource);
+ dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+ bam_mem_size);
+ if (!dd->bam.base) {
+ dev_warn(&pdev->dev,
+ "%s: Failed to ioremap(spi_bam_physical)",
+ __func__);
+ return -ENXIO;
+ }
+
+ dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+ if (dd->bam.irq < 0) {
+ dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
+ __func__);
+ return -EINVAL;
+ }
+
+ dd->dma_init = msm_spi_bam_init;
+ dd->dma_teardown = msm_spi_bam_teardown;
+ return 0;
+}
+
static int __init msm_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -1926,21 +2463,39 @@
goto skip_dma_resources;
}
}
- resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (resource) {
- dd->rx_dma_chan = resource->start;
- dd->tx_dma_chan = resource->end;
- resource = platform_get_resource(pdev, IORESOURCE_DMA,
- 1);
- if (!resource) {
- rc = -ENXIO;
- goto err_probe_res;
- }
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ resource = platform_get_resource(pdev,
+ IORESOURCE_DMA, 0);
+ if (resource) {
+ dd->rx_dma_chan = resource->start;
+ dd->tx_dma_chan = resource->end;
+ resource = platform_get_resource(pdev,
+ IORESOURCE_DMA, 1);
+ if (!resource) {
+ rc = -ENXIO;
+ goto err_probe_res;
+ }
- dd->rx_dma_crci = resource->start;
- dd->tx_dma_crci = resource->end;
+ dd->rx_dma_crci = resource->start;
+ dd->tx_dma_crci = resource->end;
+ dd->use_dma = 1;
+ master->dma_alignment =
+ dma_get_cache_alignment();
+ dd->dma_init = msm_spi_dmov_init ;
+ dd->dma_teardown = msm_spi_dmov_teardown;
+ }
+ } else {
+ if (!dd->pdata->use_bam)
+ goto skip_dma_resources;
+
+ rc = msm_spi_bam_get_resources(dd, pdev, master);
+ if (rc) {
+ dev_warn(dd->dev,
+ "%s: Faild to get BAM resources",
+ __func__);
+ goto skip_dma_resources;
+ }
dd->use_dma = 1;
- master->dma_alignment = dma_get_cache_alignment();
}
}
@@ -1968,6 +2523,15 @@
goto err_probe_reqmem;
}
+ if (pdata && pdata->ver_reg_exists) {
+ enum msm_spi_qup_version ver =
+ msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+ if (dd->qup_ver != ver)
+ dev_warn(&pdev->dev,
+ "%s: HW version different then initially assumed by probe",
+ __func__);
+ }
+
if (pdata && pdata->rsl_id) {
struct remote_mutex_id rmid;
rmid.r_spinlock_id = pdata->rsl_id;
@@ -1984,7 +2548,7 @@
dd->use_rlock = 1;
dd->pm_lat = pdata->pm_lat;
pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ PM_QOS_DEFAULT_VALUE);
}
mutex_lock(&dd->core_lock);
@@ -2026,13 +2590,16 @@
}
pclk_enabled = 1;
- rc = msm_spi_configure_gsbi(dd, pdev);
- if (rc)
- goto err_probe_gsbi;
+ /* GSBI dose not exists on B-family MSM-chips */
+ if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+ rc = msm_spi_configure_gsbi(dd, pdev);
+ if (rc)
+ goto err_probe_gsbi;
+ }
msm_spi_calculate_fifo_size(dd);
if (dd->use_dma) {
- rc = msm_spi_init_dma(dd);
+ rc = dd->dma_init(dd);
if (rc)
goto err_probe_dma;
}
@@ -2091,7 +2658,7 @@
err_probe_reg_master:
err_probe_irq:
err_probe_state:
- msm_spi_teardown_dma(dd);
+ dd->dma_teardown(dd);
err_probe_dma:
err_probe_gsbi:
if (pclk_enabled)
@@ -2174,8 +2741,7 @@
spi_debugfs_exit(dd);
sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
- msm_spi_teardown_dma(dd);
-
+ dd->dma_teardown(dd);
clk_put(dd->clk);
clk_put(dd->pclk);
destroy_workqueue(dd->workqueue);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index a0dee34..62f1830 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -41,8 +41,13 @@
#define GSBI_CTRL_REG 0x0
#define GSBI_SPI_CONFIG 0x30
+/* B-family only registers */
#define QUP_HARDWARE_VER 0x0030
+#define QUP_HARDWARE_VER_2_1_1 0X20010001
#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG 0x200
+
#define QUP_ERROR_FLAGS 0x0308
#define SPI_CONFIG QSD_REG(0x0000) QUP_REG(0x0300)
@@ -73,6 +78,7 @@
#define SPI_NO_OUTPUT 0x00000040
#define SPI_CFG_LOOPBACK 0x00000100
#define SPI_CFG_N 0x0000001F
+#define SPI_EN_EXT_OUT_FLAG 0x00010000
/* SPI_IO_CONTROL fields */
#define SPI_IO_C_FORCE_CS 0x00000800
@@ -148,8 +154,18 @@
/* Data Mover commands should be aligned to 64 bit(8 bytes) */
#define DM_BYTE_ALIGN 8
-#define SPI_QUP_VERSION_NONE 0x0
-#define SPI_QUP_VERSION_BFAM 0x2
+enum msm_spi_qup_version {
+ SPI_QUP_VERSION_NONE = 0x0,
+ SPI_QUP_VERSION_BFAM = 0x2,
+};
+
+enum msm_spi_pipe_direction {
+ SPI_BAM_CONSUMER_PIPE = 0x0,
+ SPI_BAM_PRODUCER_PIPE = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM 32
+#define SPI_MAX_TRFR_BTWN_RESETS ((64 * 1024) - 16) /* 64KB - 16byte */
static char const * const spi_rsrcs[] = {
"spi_clk",
@@ -231,6 +247,22 @@
};
#endif
+struct msm_spi_bam_pipe {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ bool teardown_required;
+};
+
+struct msm_spi_bam {
+ void __iomem *base;
+ u32 phys_addr;
+ u32 handle;
+ u32 irq;
+ struct msm_spi_bam_pipe prod;
+ struct msm_spi_bam_pipe cons;
+ bool deregister_required;
+};
+
struct msm_spi {
u8 *read_buf;
const u8 *write_buf;
@@ -244,8 +276,8 @@
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct completion transfer_complete;
- struct clk *clk;
- struct clk *pclk;
+ struct clk *clk; /* core clock */
+ struct clk *pclk; /* interface clock */
unsigned long mem_phys_addr;
size_t mem_size;
int input_fifo_size;
@@ -273,6 +305,9 @@
int tx_dma_crci;
int rx_dma_chan;
int rx_dma_crci;
+ int (*dma_init) (struct msm_spi *dd);
+ void (*dma_teardown) (struct msm_spi *dd);
+ struct msm_spi_bam bam;
/* Data Mover Commands */
struct spi_dmov_cmd *tx_dmov_cmd;
struct spi_dmov_cmd *rx_dmov_cmd;
@@ -321,7 +356,7 @@
int spi_gpios[ARRAY_SIZE(spi_rsrcs)];
/* SPI CS GPIOs for each slave */
struct spi_cs_gpio cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
- int qup_ver;
+ enum msm_spi_qup_version qup_ver;
int max_trfr_len;
};
@@ -333,7 +368,7 @@
enum msm_spi_state state);
static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
-static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
static inline void msm_spi_disable_irqs(struct msm_spi *dd)
@@ -385,7 +420,7 @@
static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
-static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
{
msm_spi_write_word_to_fifo(dd);
@@ -441,16 +476,18 @@
writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
}
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
-/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
{
u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
- msm_spi_add_configs(dd, &qup_config, bpw-1);
- writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
- dd->base + QUP_CONFIG);
+ msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+ writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
}
static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
@@ -482,12 +519,22 @@
static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
{
- writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+ writel_relaxed(
+ SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+ dd->base + SPI_ERROR_FLAGS_EN);
+ else
+ writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
}
static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
{
- writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+ writel_relaxed(
+ SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+ dd->base + SPI_ERROR_FLAGS);
+ else
+ writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
}
#endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index becd823..2161fac 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,3 +4,7 @@
obj-$(CONFIG_SPMI) += spmi.o spmi-resources.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
obj-$(CONFIG_MSM_QPNP_INT) += qpnp-int.o
+
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_SPMI) += spmi-dbgfs.o
+endif
diff --git a/drivers/spmi/spmi-dbgfs.c b/drivers/spmi/spmi-dbgfs.c
new file mode 100644
index 0000000..a23f945
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.c
@@ -0,0 +1,725 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * SPMI Debug-fs support.
+ *
+ * Hierarchy schema:
+ * /sys/kernel/debug/spmi
+ * /help -- static help text
+ * /spmi-0
+ * /spmi-0/address -- Starting register address for reads or writes
+ * /spmi-0/count -- number of registers to read (only on read)
+ * /spmi-0/data -- Triggers the SPMI formatted read.
+ * /spmi-0/data_raw -- Triggers the SPMI raw read or write
+ * /spmi-#
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/ctype.h>
+
+#define ADDR_LEN 6 /* 5 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 16 /* 16 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "spmi";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+
+/* Log buffer */
+struct spmi_log_buffer {
+ u32 rpos; /* Current 'read' position in buffer */
+ u32 wpos; /* Current 'write' position in buffer */
+ u32 len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* SPMI controller specific data */
+struct spmi_ctrl_data {
+ u32 cnt;
+ u32 addr;
+ struct list_head node;
+ struct spmi_controller *ctrl;
+};
+
+/* SPMI transaction parameters */
+struct spmi_trans {
+ u32 cnt; /* Number of bytes to read */
+ u32 addr; /* 20-bit address: SID + PID + Register offset */
+ u32 offset; /* Offset of last read data */
+ bool raw_data; /* Set to true for raw data dump */
+ struct spmi_controller *ctrl;
+ struct spmi_log_buffer *log; /* log buffer */
+};
+
+struct spmi_dbgfs {
+ struct dentry *root;
+ struct mutex lock;
+ struct list_head ctrl; /* List of spmi_ctrl_data nodes */
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct spmi_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .ctrl = LIST_HEAD_INIT(dbgfs_data.ctrl),
+ .help_msg = {
+ .data =
+"SPMI Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/spmi\n"
+" /help -- Static help text\n"
+" /spmi-0 -- Directory for SPMI bus 0\n"
+" /spmi-0/address -- Starting register address for reads or writes\n"
+" /spmi-0/count -- Number of registers to read (only used for reads)\n"
+" /spmi-0/data -- Initiates the SPMI read (formatted output)\n"
+" /spmi-0/data_raw -- Initiates the SPMI raw read or write\n"
+" /spmi-n -- Directory for SPMI bus n\n"
+"\n"
+"To perform SPMI read or write transactions, you need to first write the\n"
+"address of the slave device register to the 'address' file. For read\n"
+"transactions, the number of bytes to be read needs to be written to the\n"
+"'count' file.\n"
+"\n"
+"The 'address' file specifies the 20-bit address of a slave device register.\n"
+"The upper 4 bits 'address[19..16]' specify the slave identifier (SID) for\n"
+"the slave device. The lower 16 bits specify the slave register address.\n"
+"\n"
+"Reading from the 'data' file will initiate a SPMI read transaction starting\n"
+"from slave register 'address' for 'count' number of bytes.\n"
+"\n"
+"Writing to the 'data' file will initiate a SPMI write transaction starting\n"
+"from slave register 'address'. The number of registers written to will\n"
+"match the number of bytes written to the 'data' file.\n"
+"\n"
+"Example: Read 4 bytes starting at register address 0x1234 for SID 2\n"
+"\n"
+"echo 0x21234 > address\n"
+"echo 4 > count\n"
+"cat data\n"
+"\n"
+"Example: Write 3 bytes starting at register address 0x1008 for SID 1\n"
+"\n"
+"echo 0x11008 > address\n"
+"echo 0x01 0x02 0x03 > data\n"
+"\n"
+"Note that the count file is not used for writes. Since 3 bytes are\n"
+"written to the 'data' file, then 3 bytes will be written across the\n"
+"SPMI bus.\n\n",
+ },
+};
+
+static int spmi_dfs_open(struct spmi_ctrl_data *ctrl_data, struct file *file)
+{
+ struct spmi_log_buffer *log;
+ struct spmi_trans *trans;
+
+ size_t logbufsize = SZ_4K;
+
+ if (!ctrl_data) {
+ pr_err("No SPMI controller data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ trans->log = log;
+ trans->cnt = ctrl_data->cnt;
+ trans->addr = ctrl_data->addr;
+ trans->ctrl = ctrl_data->ctrl;
+ trans->offset = trans->addr;
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int spmi_dfs_data_open(struct inode *inode, struct file *file)
+{
+ struct spmi_ctrl_data *ctrl_data = inode->i_private;
+ return spmi_dfs_open(ctrl_data, file);
+}
+
+static int spmi_dfs_raw_data_open(struct inode *inode, struct file *file)
+{
+ int rc;
+ struct spmi_trans *trans;
+ struct spmi_ctrl_data *ctrl_data = inode->i_private;
+
+ rc = spmi_dfs_open(ctrl_data, file);
+ trans = file->private_data;
+ trans->raw_data = true;
+ return rc;
+}
+
+static int spmi_dfs_close(struct inode *inode, struct file *file)
+{
+ struct spmi_trans *trans = file->private_data;
+
+ if (trans && trans->log) {
+ file->private_data = NULL;
+ kfree(trans->log);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * spmi_read_data: reads data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: buffer to store the data read.
+ * @offset: SPMI address offset to start reading from.
+ * @cnt: The number of bytes to read.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_read_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+ int ret = 0;
+ int len;
+ uint8_t sid;
+ uint16_t addr;
+
+ while (cnt > 0) {
+ sid = (offset >> 16) & 0xF;
+ addr = offset & 0xFFFF;
+ len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+ ret = spmi_ext_register_readl(ctrl, sid, addr, buf, len);
+ if (ret < 0) {
+ pr_err("SPMI read failed, err = %d\n", ret);
+ goto done;
+ }
+
+ cnt -= len;
+ buf += len;
+ offset += len;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * spmi_write_data: writes data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: data to be written.
+ * @offset: SPMI address offset to start writing to.
+ * @cnt: The number of bytes to write.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_write_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+ int ret = 0;
+ int len;
+ uint8_t sid;
+ uint16_t addr;
+
+ while (cnt > 0) {
+ sid = (offset >> 16) & 0xF;
+ addr = offset & 0xFFFF;
+ len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+ ret = spmi_ext_register_writel(ctrl, sid, addr, buf, len);
+ if (ret < 0) {
+ pr_err("SPMI write failed, err = %d\n", ret);
+ goto done;
+ }
+
+ cnt -= len;
+ buf += len;
+ offset += len;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct spmi_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus. When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_next_line_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct spmi_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ /* Read the desired number of "items" */
+ if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+ goto done;
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (20-bit address) */
+ cnt = print_to_log(log, "%5.5X ", offset & 0xffff0);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * write_raw_data_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus. When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_raw_data_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+ u8 data[16];
+ struct spmi_log_buffer *log = trans->log;
+
+ int i;
+ int cnt = 0;
+ int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < 80)
+ goto done;
+
+ /* Read the desired number of "items" */
+ if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+ goto done;
+
+ *pcnt -= items_to_read;
+
+ /* Log the data items */
+ for (i = 0; i < items_to_read; ++i) {
+ cnt = print_to_log(log, "0x%2.2X ", data[i]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data across the SPMI bus and saves to the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct spmi_trans *trans)
+{
+ int cnt;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct spmi_log_buffer *log = trans->log;
+ int (*write_to_log)(struct spmi_trans *, int, size_t *);
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (trans->raw_data)
+ write_to_log = write_raw_data_to_log;
+ else
+ write_to_log = write_next_line_to_log;
+
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * spmi_dfs_reg_write: write user's byte array (coded as string) over SPMI.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t spmi_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+
+ struct spmi_trans *trans = file->private_data;
+ u32 offset = trans->offset;
+
+ /* Make a copy of the user data */
+ char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while (sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ /* Perform the SPMI write(s) */
+ ret = spmi_write_data(trans->ctrl, values, offset, cnt);
+
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+ return ret;
+}
+
+/**
+ * spmi_dfs_reg_read: reads value(s) over SPMI and fill user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t spmi_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct spmi_trans *trans = file->private_data;
+ struct spmi_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0)
+ return 0;
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy SPMI register values to user\n");
+ return -EFAULT;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+ return len;
+}
+
+static const struct file_operations spmi_dfs_reg_fops = {
+ .open = spmi_dfs_data_open,
+ .release = spmi_dfs_close,
+ .read = spmi_dfs_reg_read,
+ .write = spmi_dfs_reg_write,
+};
+
+static const struct file_operations spmi_dfs_raw_data_fops = {
+ .open = spmi_dfs_raw_data_open,
+ .release = spmi_dfs_close,
+ .read = spmi_dfs_reg_read,
+ .write = spmi_dfs_reg_write,
+};
+
+/**
+ * spmi_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *spmi_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating SPMI debugfs file-system at\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if ((int)root == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * spmi_dfs_get_root: return a pointer to SPMI debugfs root directory.
+ * @brief return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SPMI transaction, namely: sid, address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *spmi_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = spmi_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * spmi_dfs_add_controller: adds new spmi controller entry
+ * @return zero on success
+ */
+int spmi_dfs_add_controller(struct spmi_controller *ctrl)
+{
+ struct dentry *dir;
+ struct dentry *root;
+ struct dentry *file;
+ struct spmi_ctrl_data *ctrl_data;
+
+ pr_debug("Adding controller %s\n", ctrl->dev.kobj.name);
+ root = spmi_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ /* Allocate transaction data for the controller */
+ ctrl_data = kzalloc(sizeof(*ctrl_data), GFP_KERNEL);
+ if (!ctrl_data)
+ return -ENOMEM;
+
+ dir = debugfs_create_dir(ctrl->dev.kobj.name, root);
+ if (!dir) {
+ pr_err("Error creating entry for spmi controller %s\n",
+ ctrl->dev.kobj.name);
+ goto err_create_dir_failed;
+ }
+
+ ctrl_data->cnt = 1;
+ ctrl_data->ctrl = ctrl;
+
+ file = debugfs_create_u32("count", DFS_MODE, dir, &ctrl_data->cnt);
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE, dir, &ctrl_data->addr);
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, dir, ctrl_data,
+ &spmi_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data_raw", DFS_MODE, dir, ctrl_data,
+ &spmi_dfs_raw_data_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ list_add(&ctrl_data->node, &dbgfs_data.ctrl);
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(dir);
+err_create_dir_failed:
+ kfree(ctrl_data);
+ return -ENOMEM;
+}
+
+static void __exit spmi_dfs_delete_all_ctrl(struct list_head *head)
+{
+ struct list_head *pos, *tmp;
+
+ list_for_each_safe(pos, tmp, head) {
+ struct spmi_ctrl_data *ctrl_data;
+
+ ctrl_data = list_entry(pos, struct spmi_ctrl_data, node);
+ list_del(pos);
+ kfree(ctrl_data);
+ }
+}
+
+static void __exit spmi_dfs_destroy(void)
+{
+ pr_debug("de-initializing spmi debugfs ...");
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return;
+ if (dbgfs_data.root) {
+ debugfs_remove_recursive(dbgfs_data.root);
+ dbgfs_data.root = NULL;
+ spmi_dfs_delete_all_ctrl(&dbgfs_data.ctrl);
+ }
+ mutex_unlock(&dbgfs_data.lock);
+}
+
+module_exit(spmi_dfs_destroy);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_debug_fs");
diff --git a/drivers/spmi/spmi-dbgfs.h b/drivers/spmi/spmi-dbgfs.h
new file mode 100644
index 0000000..0baa4db
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SPMI_DBGFS_H
+#define _SPMI_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+int spmi_dfs_add_controller(struct spmi_controller *ctrl);
+#else
+int spmi_dfs_add_controller(struct spmi_controller *ctrl) { return 0; }
+#endif
+
+#endif /* _SPMI_DBGFS_H */
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 914df95..ad58240 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include "spmi-dbgfs.h"
+
struct spmii_boardinfo {
struct list_head list;
struct spmi_boardinfo board_info;
@@ -755,6 +757,7 @@
list_add_tail(&ctrl->list, &spmi_ctrl_list);
mutex_unlock(&board_lock);
+ spmi_dfs_add_controller(ctrl);
return 0;
exit:
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
index ec04369..4568933 100644
--- a/drivers/thermal/pm8xxx-tm.c
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -33,29 +33,32 @@
#include <linux/msm_adc.h>
/* Register TEMP_ALARM_CTRL bits */
-#define TEMP_ALARM_CTRL_ST3_SD 0x80
-#define TEMP_ALARM_CTRL_ST2_SD 0x40
-#define TEMP_ALARM_CTRL_STATUS_MASK 0x30
-#define TEMP_ALARM_CTRL_STATUS_SHIFT 4
-#define TEMP_ALARM_CTRL_THRESH_MASK 0x0C
-#define TEMP_ALARM_CTRL_THRESH_SHIFT 2
-#define TEMP_ALARM_CTRL_OVRD_ST3 0x02
-#define TEMP_ALARM_CTRL_OVRD_ST2 0x01
-#define TEMP_ALARM_CTRL_OVRD_MASK 0x03
+#define TEMP_ALARM_CTRL_ST3_SD 0x80
+#define TEMP_ALARM_CTRL_ST2_SD 0x40
+#define TEMP_ALARM_CTRL_STATUS_MASK 0x30
+#define TEMP_ALARM_CTRL_STATUS_SHIFT 4
+#define TEMP_ALARM_CTRL_THRESH_MASK 0x0C
+#define TEMP_ALARM_CTRL_THRESH_SHIFT 2
+#define TEMP_ALARM_CTRL_OVRD_ST3 0x02
+#define TEMP_ALARM_CTRL_OVRD_ST2 0x01
+#define TEMP_ALARM_CTRL_OVRD_MASK 0x03
-#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
-#define TEMP_STAGE_HYSTERESIS 2000
+#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS 2000
-#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
-#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
/* Register TEMP_ALARM_PWM bits */
-#define TEMP_ALARM_PWM_EN_MASK 0xC0
-#define TEMP_ALARM_PWM_EN_SHIFT 6
-#define TEMP_ALARM_PWM_PER_PRE_MASK 0x38
-#define TEMP_ALARM_PWM_PER_PRE_SHIFT 3
-#define TEMP_ALARM_PWM_PER_DIV_MASK 0x07
-#define TEMP_ALARM_PWM_PER_DIV_SHIFT 0
+#define TEMP_ALARM_PWM_EN_MASK 0xC0
+#define TEMP_ALARM_PWM_EN_NEVER 0x00
+#define TEMP_ALARM_PWM_EN_SLEEP_B 0x40
+#define TEMP_ALARM_PWM_EN_PWM 0x80
+#define TEMP_ALARM_PWM_EN_ALWAYS 0xC0
+#define TEMP_ALARM_PWM_PER_PRE_MASK 0x38
+#define TEMP_ALARM_PWM_PER_PRE_SHIFT 3
+#define TEMP_ALARM_PWM_PER_DIV_MASK 0x07
+#define TEMP_ALARM_PWM_PER_DIV_SHIFT 0
/* Trips: from critical to less critical */
#define TRIP_STAGE3 0
@@ -516,16 +519,15 @@
return rc;
/*
- * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
- * helps cut down on the number of unnecessary interrupts fired when
- * changing between thermal stages. Also, Enable the over temperature
- * PWM whenever the PMIC is enabled.
+ * Set the PMIC temperature alarm module to be always on. This ensures
+ * that die temperature monitoring is active even if CXO is disabled
+ * (i.e. when sleep_b is low). This is necessary since CXO can be
+ * disabled while the system is still heavily loaded. Also, using
+ * the alway-on instead of PWM-enabled configurations ensures that the
+ * die temperature can be measured by the PMIC ADC without reconfiguring
+ * the temperature alarm module first.
*/
- reg = (1 << TEMP_ALARM_PWM_EN_SHIFT)
- | (3 << TEMP_ALARM_PWM_PER_PRE_SHIFT)
- | (3 << TEMP_ALARM_PWM_PER_DIV_SHIFT);
-
- rc = pm8xxx_tm_write_pwm(chip, reg);
+ rc = pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_ALWAYS);
return rc;
}
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e6f5bf5..5e7ab9f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -848,8 +848,7 @@
},
};
-#define UART_NR ARRAY_SIZE(msm_uart_ports)
-
+#define UART_NR 256
static inline struct uart_port * get_port_from_line(unsigned int line)
{
return &msm_uart_ports[line].uart;
@@ -1002,9 +1001,7 @@
struct resource *resource;
struct uart_port *port;
int irq;
-#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
struct msm_serial_platform_data *pdata = pdev->dev.platform_data;
-#endif
if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
return -ENXIO;
@@ -1057,6 +1054,8 @@
#endif
pm_runtime_enable(port->dev);
+ if (pdata != NULL && pdata->userid && pdata->userid <= UART_NR)
+ port->line = pdata->userid;
return uart_add_one_port(&msm_uart_driver, port);
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 84cd3e7..7a0e32b 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -391,8 +391,6 @@
struct msm_hs_port *msm_uport;
struct device *dev;
- struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
-
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
@@ -402,10 +400,6 @@
msm_uport = &q_uart_port[pdev->id];
dev = msm_uport->uport.dev;
- if (pdata && pdata->gpio_config)
- if (pdata->gpio_config(0))
- dev_err(dev, "GPIO config error\n");
-
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
debugfs_remove(msm_uport->loopback_dir);
@@ -1646,6 +1640,9 @@
unsigned long flags;
unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ const struct msm_serial_hs_platform_data *pdata =
+ pdev->dev.platform_data;
struct circ_buf *tx_buf = &uport->state->xmit;
struct msm_hs_tx *tx = &msm_uport->tx;
@@ -1665,6 +1662,10 @@
return ret;
}
+ if (pdata && pdata->gpio_config)
+ if (unlikely(pdata->gpio_config(1)))
+ dev_err(uport->dev, "Cannot configure gpios\n");
+
/* Set auto RFR Level */
data = msm_hs_read(uport, UARTDM_MR1_ADDR);
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
@@ -1945,10 +1946,6 @@
if (unlikely(msm_uport->wakeup.irq < 0))
return -ENXIO;
- if (pdata->gpio_config)
- if (unlikely(pdata->gpio_config(1)))
- dev_err(uport->dev, "Cannot configure"
- "gpios\n");
}
resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -2086,6 +2083,9 @@
unsigned int data;
unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ const struct msm_serial_hs_platform_data *pdata =
+ pdev->dev.platform_data;
if (msm_uport->tx.dma_in_flight) {
spin_lock_irqsave(&uport->lock, flags);
@@ -2148,6 +2148,10 @@
free_irq(uport->irq, msm_uport);
if (use_low_power_wakeup(msm_uport))
free_irq(msm_uport->wakeup.irq, msm_uport);
+
+ if (pdata && pdata->gpio_config)
+ if (pdata->gpio_config(0))
+ dev_err(uport->dev, "GPIO config error\n");
}
static void __exit msm_serial_hs_exit(void)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 2f3f83d..cc9ffaa 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -1369,9 +1369,12 @@
else
line = pdev->id;
- /* Use line number from device tree if present */
- if (pdev->dev.of_node)
- of_property_read_u32(pdev->dev.of_node, "cell-index", &line);
+ /* Use line number from device tree alias if present */
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ line = ret;
+ }
if (unlikely(line < 0 || line >= UART_NR))
return -ENXIO;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 494ec49..55ff980 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1357,8 +1357,14 @@
{
struct usb_device *udev = to_usb_device(dev);
- if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
- return 0;
+ if (udev->bus->skip_resume) {
+ if (udev->state == USB_STATE_SUSPENDED) {
+ return 0;
+ } else {
+ dev_err(dev, "abort suspend\n");
+ return -EBUSY;
+ }
+ }
unbind_no_pm_drivers_interfaces(udev);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4073fc8..7430e5a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -36,6 +36,7 @@
#include <linux/power_supply.h>
#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
#include <mach/msm_xo.h>
#include <mach/msm_bus.h>
@@ -141,8 +142,6 @@
struct regulator *hsusb_vddcx;
struct regulator *ssusb_1p8;
struct regulator *ssusb_vddcx;
- enum usb_vdd_type ss_vdd_type;
- enum usb_vdd_type hs_vdd_type;
struct dwc3_ext_xceiv ext_xceiv;
bool resume_pending;
atomic_t pm_suspended;
@@ -162,6 +161,9 @@
unsigned int online;
unsigned int host_mode;
unsigned int current_max;
+ unsigned int vdd_no_vol_level;
+ unsigned int vdd_low_vol_level;
+ unsigned int vdd_high_vol_level;
bool vbus_active;
};
@@ -177,23 +179,6 @@
#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
-#define USB_PHY_VDD_DIG_VOL_NONE 0 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-
-static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
- { /* VDD_CX CORNER Voting */
- [VDD_NONE] = RPM_VREG_CORNER_NONE,
- [VDD_MIN] = RPM_VREG_CORNER_NOMINAL,
- [VDD_MAX] = RPM_VREG_CORNER_HIGH,
- },
- { /* VDD_CX Voltage Voting */
- [VDD_NONE] = USB_PHY_VDD_DIG_VOL_NONE,
- [VDD_MIN] = USB_PHY_VDD_DIG_VOL_MIN,
- [VDD_MAX] = USB_PHY_VDD_DIG_VOL_MAX,
- },
-};
-
static struct dwc3_msm *context;
static u64 dwc3_msm_dma_mask = DMA_BIT_MASK(64);
@@ -858,12 +843,11 @@
/* HSPHY */
static int dwc3_hsusb_config_vddcx(int high)
{
- int min_vol, ret;
+ int min_vol, max_vol, ret;
struct dwc3_msm *dwc = context;
- enum usb_vdd_type vdd_type = context->hs_vdd_type;
- int max_vol = vdd_val[vdd_type][VDD_MAX];
- min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+ max_vol = dwc->vdd_high_vol_level;
+ min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
ret = regulator_set_voltage(dwc->hsusb_vddcx, min_vol, max_vol);
if (ret) {
dev_err(dwc->dev, "unable to set voltage for HSUSB_VDDCX\n");
@@ -983,12 +967,11 @@
/* SSPHY */
static int dwc3_ssusb_config_vddcx(int high)
{
- int min_vol, ret;
+ int min_vol, max_vol, ret;
struct dwc3_msm *dwc = context;
- enum usb_vdd_type vdd_type = context->ss_vdd_type;
- int max_vol = vdd_val[vdd_type][VDD_MAX];
- min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+ max_vol = dwc->vdd_high_vol_level;
+ min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
ret = regulator_set_voltage(dwc->ssusb_vddcx, min_vol, max_vol);
if (ret) {
dev_err(dwc->dev, "unable to set voltage for SSUSB_VDDCX\n");
@@ -1250,6 +1233,14 @@
return 0;
}
+ if (cancel_delayed_work_sync(&mdwc->chg_work))
+ dev_dbg(mdwc->dev, "%s: chg_work was pending\n", __func__);
+ if (mdwc->chg_state != USB_CHG_STATE_DETECTED) {
+ /* charger detection wasn't complete; re-init flags */
+ mdwc->chg_state = USB_CHG_STATE_UNDEFINED;
+ mdwc->charger.chg_type = DWC3_INVALID_CHARGER;
+ }
+
/* Sequence to put hardware in low power state:
* 1. Set OTGDISABLE to disable OTG block in HSPHY (saves power)
* 2. Clear charger detection control fields
@@ -1615,6 +1606,8 @@
struct resource *res;
void __iomem *tcsr;
int ret = 0;
+ int len = 0;
+ u32 tmp[3];
msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
if (!msm) {
@@ -1689,19 +1682,26 @@
}
clk_prepare_enable(msm->ref_clk);
+
+ of_get_property(node, "qcom,vdd-voltage-level", &len);
+ if (len == sizeof(tmp)) {
+ of_property_read_u32_array(node, "qcom,vdd-voltage-level",
+ tmp, len/sizeof(*tmp));
+ msm->vdd_no_vol_level = tmp[0];
+ msm->vdd_low_vol_level = tmp[1];
+ msm->vdd_high_vol_level = tmp[2];
+ } else {
+ dev_err(&pdev->dev, "no qcom,vdd-voltage-level property\n");
+ ret = -EINVAL;
+ goto disable_ref_clk;
+ }
+
/* SS PHY */
- msm->ss_vdd_type = VDDCX_CORNER;
msm->ssusb_vddcx = devm_regulator_get(&pdev->dev, "ssusb_vdd_dig");
if (IS_ERR(msm->ssusb_vddcx)) {
- msm->ssusb_vddcx = devm_regulator_get(&pdev->dev,
- "SSUSB_VDDCX");
- if (IS_ERR(msm->ssusb_vddcx)) {
- dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
- ret = PTR_ERR(msm->ssusb_vddcx);
- goto disable_ref_clk;
- }
- msm->ss_vdd_type = VDDCX;
- dev_dbg(&pdev->dev, "ss_vdd_type: VDDCX\n");
+ dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
+ ret = PTR_ERR(msm->ssusb_vddcx);
+ goto disable_ref_clk;
}
ret = dwc3_ssusb_config_vddcx(1);
@@ -1729,18 +1729,11 @@
}
/* HS PHY */
- msm->hs_vdd_type = VDDCX_CORNER;
msm->hsusb_vddcx = devm_regulator_get(&pdev->dev, "hsusb_vdd_dig");
if (IS_ERR(msm->hsusb_vddcx)) {
- msm->hsusb_vddcx = devm_regulator_get(&pdev->dev,
- "HSUSB_VDDCX");
- if (IS_ERR(msm->hsusb_vddcx)) {
- dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
- ret = PTR_ERR(msm->ssusb_vddcx);
- goto disable_ss_ldo;
- }
- msm->hs_vdd_type = VDDCX;
- dev_dbg(&pdev->dev, "hs_vdd_type: VDDCX\n");
+ dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
+ ret = PTR_ERR(msm->hsusb_vddcx);
+ goto disable_ss_ldo;
}
ret = dwc3_hsusb_config_vddcx(1);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 7b672c4..fab443c 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -640,14 +640,9 @@
}
}
} else {
- if (charger) {
- if (charger->chg_type == DWC3_INVALID_CHARGER)
- charger->start_detection(dotg->charger,
- false);
- else
- charger->chg_type =
- DWC3_INVALID_CHARGER;
- }
+ if (charger)
+ charger->start_detection(dotg->charger, false);
+
dwc3_otg_set_power(phy, 0);
dev_dbg(phy->dev, "No device, trying to suspend\n");
pm_runtime_put_sync(phy->dev);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f903dd..3679191 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1527,6 +1527,13 @@
} else {
ret = dwc3_gadget_run_stop(dwc, 0);
}
+ } else if (dwc->gadget_driver && !dwc->softconnect &&
+ !dwc->vbus_active) {
+ if (dwc->gadget_driver->disconnect) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc->gadget_driver->disconnect(&dwc->gadget);
+ return 0;
+ }
}
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 8f68234..aca2af3 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
#include <mach/usbdiag.h>
@@ -427,6 +428,7 @@
struct diag_context *ctxt = ch->priv_usb;
unsigned long flags;
struct usb_request *req;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
if (!ctxt)
return -ENODEV;
@@ -456,7 +458,9 @@
spin_lock_irqsave(&ctxt->lock, flags);
list_add_tail(&req->list, &ctxt->read_pool);
spin_unlock_irqrestore(&ctxt->lock, flags);
- ERROR(ctxt->cdev, "%s: cannot queue"
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
" read request\n", __func__);
return -EIO;
}
@@ -483,6 +487,7 @@
struct diag_context *ctxt = ch->priv_usb;
unsigned long flags;
struct usb_request *req = NULL;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
if (!ctxt)
return -ENODEV;
@@ -512,7 +517,9 @@
spin_lock_irqsave(&ctxt->lock, flags);
list_add_tail(&req->list, &ctxt->write_pool);
spin_unlock_irqrestore(&ctxt->lock, flags);
- ERROR(ctxt->cdev, "%s: cannot queue"
+ /* 1 error message for every 10 sec */
+ if (__ratelimit(&rl))
+ ERROR(ctxt->cdev, "%s: cannot queue"
" read request\n", __func__);
return -EIO;
}
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 6b9295b..85240ef 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -941,23 +941,23 @@
pr_debug("dev:%p port#%d\n", dev, dev->port_num);
- spin_lock(&dev->lock);
- if (!dev->is_open) {
- pr_err("mbim file handler %p is not open", dev);
- spin_unlock(&dev->lock);
- return;
- }
-
cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
if (!cpkt) {
pr_err("Unable to allocate ctrl pkt\n");
- spin_unlock(&dev->lock);
return;
}
pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
memcpy(cpkt->buf, req->buf, len);
+ spin_lock(&dev->lock);
+ if (!dev->is_open) {
+ pr_err("mbim file handler %p is not open", dev);
+ spin_unlock(&dev->lock);
+ mbim_free_ctrl_pkt(cpkt);
+ return;
+ }
+
list_add_tail(&cpkt->list, &dev->cpkt_req_q);
spin_unlock(&dev->lock);
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 4357e0d..79aac27 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -18,6 +18,7 @@
#include <linux/spinlock.h>
#include <mach/usb_gadget_xport.h>
+#include <mach/usb_bam.h>
#include "u_rmnet.h"
#include "gadget_chips.h"
@@ -49,6 +50,9 @@
struct list_head cpkt_resp_q;
atomic_t notify_count;
unsigned long cpkts_len;
+
+ /* IPA / RmNet Bridge support*/
+ struct usb_bam_connect_ipa_params ipa_params;
};
#define NR_RMNET_PORTS 3
@@ -149,7 +153,7 @@
/* Super speed support */
static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
- .bLength = sizeof rmnet_ss_notify_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
@@ -168,7 +172,7 @@
};
static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
- .bLength = sizeof rmnet_ss_in_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
@@ -185,7 +189,7 @@
};
static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
- .bLength = sizeof rmnet_ss_out_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
@@ -430,7 +434,17 @@
case USB_GADGET_XPORT_BAM:
case USB_GADGET_XPORT_BAM2BAM:
ret = gbam_connect(&dev->port, port_num,
- dxport, port_num);
+ dxport, port_num, NULL);
+ if (ret) {
+ pr_err("%s: gbam_connect failed: err:%d\n",
+ __func__, ret);
+ gsmd_ctrl_disconnect(&dev->port, port_num);
+ return ret;
+ }
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ ret = gbam_connect(&dev->port, port_num,
+ dxport, port_num, &(dev->ipa_params));
if (ret) {
pr_err("%s: gbam_connect failed: err:%d\n",
__func__, ret);
@@ -500,7 +514,11 @@
switch (dxport) {
case USB_GADGET_XPORT_BAM:
case USB_GADGET_XPORT_BAM2BAM:
- gbam_disconnect(&dev->port, port_num, dxport);
+ gbam_disconnect(&dev->port, port_num, dxport, NULL);
+ break;
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
+ gbam_disconnect(&dev->port, port_num, dxport,
+ &(dev->ipa_params));
break;
case USB_GADGET_XPORT_HSIC:
ghsic_data_disconnect(&dev->port, port_num);
@@ -551,6 +569,7 @@
case USB_GADGET_XPORT_BAM:
break;
case USB_GADGET_XPORT_BAM2BAM:
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
gbam_suspend(&dev->port, port_num, dxport);
break;
case USB_GADGET_XPORT_HSIC:
@@ -580,6 +599,7 @@
case USB_GADGET_XPORT_BAM:
break;
case USB_GADGET_XPORT_BAM2BAM:
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
gbam_resume(&dev->port, port_num, dxport);
break;
case USB_GADGET_XPORT_HSIC:
@@ -1235,6 +1255,7 @@
no_data_bam_ports++;
break;
case USB_GADGET_XPORT_BAM2BAM:
+ case USB_GADGET_XPORT_BAM2BAM_IPA:
rmnet_port->data_xport_num = no_data_bam2bam_ports;
no_data_bam2bam_ports++;
break;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 43347b3..74dba07 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -246,7 +246,7 @@
#ifdef CONFIG_MODEM_SUPPORT
static struct usb_endpoint_descriptor gser_ss_notify_desc = {
- .bLength = sizeof gser_ss_notify_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index f092329..7f3713f 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -101,6 +101,8 @@
u32 src_pipe_idx;
u32 dst_pipe_idx;
u8 connection_idx;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params *ipa_params;
/* stats */
unsigned int pending_with_bam;
@@ -640,6 +642,21 @@
clear_bit(BAM_CH_OPENED, &d->flags);
}
+static void gbam2bam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ ret = usb_bam_disconnect_ipa(d->connection_idx, d->ipa_params);
+ if (ret)
+ pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
+ __func__, ret);
+ rmnet_bridge_disconnect();
+ }
+}
+
static void gbam_connect_work(struct work_struct *w)
{
struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
@@ -680,12 +697,38 @@
u32 sps_params;
int ret;
- ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
- &d->dst_pipe_idx);
- if (ret) {
- pr_err("%s: usb_bam_connect failed: err:%d\n",
- __func__, ret);
- return;
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
+ ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
+ &d->dst_pipe_idx);
+ if (ret) {
+ pr_err("%s: usb_bam_connect failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params->client = IPA_CLIENT_USB_CONS;
+ d->ipa_params->dir = PEER_PERIPHERAL_TO_USB;
+ ret = usb_bam_connect_ipa(d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
+
+ d->ipa_params->client = IPA_CLIENT_USB_PROD;
+ d->ipa_params->dir = USB_TO_PEER_PERIPHERAL;
+ /* Currently only DMA mode is supported */
+ d->ipa_params->ipa_ep_cfg.mode.mode = IPA_DMA;
+ d->ipa_params->ipa_ep_cfg.mode.dst =
+ IPA_CLIENT_A2_TETHERED_CONS;
+ ret = usb_bam_connect_ipa(d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ rmnet_bridge_connect(d->ipa_params->prod_clnt_hdl,
+ d->ipa_params->cons_clnt_hdl, 0);
}
d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
@@ -873,6 +916,7 @@
spin_lock_init(&port->port_lock_dl);
INIT_WORK(&port->connect_w, gbam2bam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
/* data ch */
d = &port->data_ch;
@@ -993,7 +1037,8 @@
static void gam_debugfs_init(void) { }
#endif
-void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans,
+ struct usb_bam_connect_ipa_params *ipa_params)
{
struct gbam_port *port;
unsigned long flags;
@@ -1008,7 +1053,8 @@
return;
}
- if (trans == USB_GADGET_XPORT_BAM2BAM &&
+ if ((trans == USB_GADGET_XPORT_BAM2BAM ||
+ trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
port_num >= n_bam2bam_ports) {
pr_err("%s: invalid bam2bam portno#%d\n",
__func__, port_num);
@@ -1044,12 +1090,14 @@
gr->in->driver_data = NULL;
gr->out->driver_data = NULL;
- if (trans == USB_GADGET_XPORT_BAM)
+ if (trans == USB_GADGET_XPORT_BAM ||
+ trans == USB_GADGET_XPORT_BAM2BAM_IPA)
queue_work(gbam_wq, &port->disconnect_w);
}
int gbam_connect(struct grmnet *gr, u8 port_num,
- enum transport_type trans, u8 connection_idx)
+ enum transport_type trans, u8 connection_idx,
+ struct usb_bam_connect_ipa_params *ipa_params)
{
struct gbam_port *port;
struct bam_ch_info *d;
@@ -1063,7 +1111,9 @@
return -ENODEV;
}
- if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
+ if ((trans == USB_GADGET_XPORT_BAM2BAM ||
+ trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ && port_num >= n_bam2bam_ports) {
pr_err("%s: invalid portno#%d\n", __func__, port_num);
return -ENODEV;
}
@@ -1115,8 +1165,15 @@
if (trans == USB_GADGET_XPORT_BAM2BAM) {
port->gr = gr;
d->connection_idx = connection_idx;
+ } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params = ipa_params;
+ port->gr = gr;
+ d->ipa_params->src_pipe = &(d->src_pipe_idx);
+ d->ipa_params->dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params->idx = connection_idx;
}
+ d->trans = trans;
queue_work(gbam_wq, &port->connect_w);
return 0;
@@ -1195,7 +1252,8 @@
struct gbam_port *port;
struct bam_ch_info *d;
- if (trans != USB_GADGET_XPORT_BAM2BAM)
+ if (trans != USB_GADGET_XPORT_BAM2BAM &&
+ trans != USB_GADGET_XPORT_BAM2BAM_IPA)
return;
port = bam2bam_ports[port_num];
@@ -1211,7 +1269,8 @@
struct gbam_port *port;
struct bam_ch_info *d;
- if (trans != USB_GADGET_XPORT_BAM2BAM)
+ if (trans != USB_GADGET_XPORT_BAM2BAM &&
+ trans != USB_GADGET_XPORT_BAM2BAM_IPA)
return;
port = bam2bam_ports[port_num];
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index 4931c1e..bba2ca6 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -332,6 +332,7 @@
net_dev = dev_get_by_name(&init_net, netname);
if (net_dev) {
+ dev_put(net_dev);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
@@ -355,6 +356,10 @@
/* Extract the eth_qc_dev from the net device */
net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_put(net_dev);
dev = netdev_priv(net_dev);
if (!dev)
@@ -400,6 +405,10 @@
/* Extract the eth_qc_dev from the net device */
net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return;
+
+ dev_put(net_dev);
dev = netdev_priv(net_dev);
if (!dev)
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
index 0f7c4fb..a3d42fa 100644
--- a/drivers/usb/gadget/u_rmnet.h
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -48,8 +48,10 @@
int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port);
int gbam_connect(struct grmnet *gr, u8 port_num,
- enum transport_type trans, u8 connection_idx);
-void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans);
+ enum transport_type trans, u8 connection_idx,
+ struct usb_bam_connect_ipa_params *ipa_params);
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans,
+ struct usb_bam_connect_ipa_params *ipa_params);
void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans);
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans);
int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index fff9465..1a75bd7 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -337,7 +337,7 @@
/* caller has locked the root hub, and should reset/reinit on error */
-static int ehci_bus_resume (struct usb_hcd *hcd)
+static int __maybe_unused ehci_bus_resume(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 2d69a98..7d12598 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -76,6 +76,7 @@
struct clk *phy_clk;
struct clk *cal_clk;
struct regulator *hsic_vddcx;
+ struct regulator *hsic_gdsc;
bool async_int;
atomic_t in_lpm;
struct wake_lock wlock;
@@ -103,6 +104,8 @@
struct msm_hsic_hcd *__mehci;
static bool debug_bus_voting_enabled = true;
+static u64 ehci_msm_hsic_dma_mask = DMA_BIT_MASK(32);
+
static unsigned int enable_payload_log = 1;
module_param(enable_payload_log, uint, S_IRUGO | S_IWUSR);
@@ -393,6 +396,35 @@
}
+/* Global Distributed Switch Controller (GDSC) init */
+static int msm_hsic_init_gdsc(struct msm_hsic_hcd *mehci, int init)
+{
+ int ret = 0;
+
+ if (IS_ERR(mehci->hsic_gdsc))
+ return 0;
+
+ if (!mehci->hsic_gdsc) {
+ mehci->hsic_gdsc = devm_regulator_get(mehci->dev,
+ "HSIC_GDSC");
+ if (IS_ERR(mehci->hsic_gdsc))
+ return 0;
+ }
+
+ if (init) {
+ ret = regulator_enable(mehci->hsic_gdsc);
+ if (ret) {
+ dev_err(mehci->dev, "unable to enable hsic gdsc\n");
+ return ret;
+ }
+ } else {
+ regulator_disable(mehci->hsic_gdsc);
+ }
+
+ return 0;
+
+}
+
static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -563,18 +595,22 @@
#define HSIC_PAD_CALIBRATION 0xA8
#define HSIC_GPIO_PAD_VAL 0x0A0AAA10
#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
-static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+
+static void msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
- int ret;
- struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
msm_hsic_clk_reset(mehci);
/* select ulpi phy */
writel_relaxed(0x80000000, USB_PORTSC);
-
mb();
+}
+
+static int msm_hsic_start(struct msm_hsic_hcd *mehci)
+{
+ struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+ int ret;
/* HSIC init sequence when HSIC signals (Strobe/Data) are
routed via GPIOs */
@@ -635,6 +671,15 @@
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
#ifdef CONFIG_PM_SLEEP
+static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+{
+ /* reset HSIC phy */
+ msm_hsic_phy_reset(mehci);
+
+ /* HSIC init procedure (caliberation) */
+ return msm_hsic_start(mehci);
+}
+
static int msm_hsic_suspend(struct msm_hsic_hcd *mehci)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -1534,6 +1579,11 @@
dev_dbg(&pdev->dev, "ehci_msm-hsic probe\n");
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &ehci_msm_hsic_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
/* After parent device's probe is executed, it will be put in suspend
* mode. When child device's probe is called, driver core is not
* resuming parent device due to which parent will be in suspend even
@@ -1588,6 +1638,13 @@
if (pdata)
mehci->ehci.log2_irq_thresh = pdata->log2_irq_thresh;
+ ret = msm_hsic_init_gdsc(mehci, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize GDSC\n");
+ ret = -ENODEV;
+ goto put_hcd;
+ }
+
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ,
"peripheral_status_irq");
@@ -1616,11 +1673,8 @@
init_completion(&mehci->rt_completion);
init_completion(&mehci->gpt0_completion);
- ret = msm_hsic_reset(mehci);
- if (ret) {
- dev_err(&pdev->dev, "unable to initialize PHY\n");
- goto deinit_vddcx;
- }
+
+ msm_hsic_phy_reset(mehci);
ehci_wq = create_singlethread_workqueue("ehci_wq");
if (!ehci_wq) {
@@ -1634,7 +1688,13 @@
ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (ret) {
dev_err(&pdev->dev, "unable to register HCD\n");
- goto unconfig_gpio;
+ goto destroy_wq;
+ }
+
+ ret = msm_hsic_start(mehci);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize PHY\n");
+ goto destroy_wq;
}
device_init_wakeup(&pdev->dev, 1);
@@ -1710,11 +1770,11 @@
return 0;
-unconfig_gpio:
+destroy_wq:
destroy_workqueue(ehci_wq);
- msm_hsic_config_gpios(mehci, 0);
deinit_vddcx:
msm_hsic_init_vddcx(mehci, 0);
+ msm_hsic_init_gdsc(mehci, 0);
deinit_clocks:
msm_hsic_init_clocks(mehci, 0);
unmap:
@@ -1763,6 +1823,7 @@
usb_remove_hcd(hcd);
msm_hsic_config_gpios(mehci, 0);
msm_hsic_init_vddcx(mehci, 0);
+ msm_hsic_init_gdsc(mehci, 0);
msm_hsic_init_clocks(mehci, 0);
wake_lock_destroy(&mehci->wlock);
@@ -1825,7 +1886,8 @@
* when remote wakeup is received or interface driver
* start I/O.
*/
- if (!atomic_read(&mehci->pm_usage_cnt))
+ if (!atomic_read(&mehci->pm_usage_cnt) &&
+ pm_runtime_suspended(dev))
return 0;
ret = msm_hsic_resume(mehci);
@@ -1881,7 +1943,11 @@
msm_hsic_runtime_idle)
};
#endif
-
+static const struct of_device_id hsic_host_dt_match[] = {
+ { .compatible = "qcom,hsic-host",
+ },
+ {}
+};
static struct platform_driver ehci_msm_hsic_driver = {
.probe = ehci_hsic_msm_probe,
.remove = __devexit_p(ehci_hsic_msm_remove),
@@ -1890,5 +1956,6 @@
#ifdef CONFIG_PM
.pm = &msm_hsic_dev_pm_ops,
#endif
+ .of_match_table = hsic_host_dt_match,
},
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3d9422f..38a3c15 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1277,7 +1277,7 @@
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
- struct usb_hcd *hcd;
+ struct usb_hcd *hcd = NULL;
u32 port_id;
u32 temp, temp1;
int max_ports;
@@ -1331,6 +1331,8 @@
*/
/* Find the right roothub. */
hcd = xhci_to_hcd(xhci);
+ if (!hcd)
+ goto cleanup;
if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
hcd = xhci->shared_hcd;
bus_state = &xhci->bus_state[hcd_index(hcd)];
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 23a9499..c6fe765 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -1582,6 +1582,26 @@
return 0;
}
+static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
+{
+ unsigned long flags;
+ int id;
+
+ if (!motg->pdata->pmic_id_irq)
+ return -ENODEV;
+
+ local_irq_save(flags);
+ id = irq_read_line(motg->pdata->pmic_id_irq);
+ local_irq_restore(flags);
+
+ /*
+ * If we can not read ID line state for some reason, treat
+ * it as float. This would prevent MHL discovery and kicking
+ * host mode unnecessarily.
+ */
+ return !!id;
+}
+
static int msm_otg_mhl_register_callback(struct msm_otg *motg,
void (*callback)(int on))
{
@@ -1664,14 +1684,11 @@
static bool msm_chg_mhl_detect(struct msm_otg *motg)
{
bool ret, id;
- unsigned long flags;
if (!motg->mhl_enabled)
return false;
- local_irq_save(flags);
- id = irq_read_line(motg->pdata->pmic_id_irq);
- local_irq_restore(flags);
+ id = msm_otg_read_pmic_id_state(motg);
if (id)
return false;
@@ -2299,13 +2316,10 @@
clear_bit(B_SESS_VLD, &motg->inputs);
} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
if (pdata->pmic_id_irq) {
- unsigned long flags;
- local_irq_save(flags);
- if (irq_read_line(pdata->pmic_id_irq))
+ if (msm_otg_read_pmic_id_state(motg))
set_bit(ID, &motg->inputs);
else
clear_bit(ID, &motg->inputs);
- local_irq_restore(flags);
}
/*
* VBUS initial state is reported after PMIC
@@ -2453,6 +2467,18 @@
motg->chg_type = USB_INVALID_CHARGER;
msm_otg_notify_charger(motg, 0);
msm_otg_reset(otg->phy);
+ /*
+ * There is a small window where ID interrupt
+ * is not monitored during ID detection circuit
+ * switch from ACA to PMIC. Check ID state
+ * before entering into low power mode.
+ */
+ if (!msm_otg_read_pmic_id_state(motg)) {
+ pr_debug("process missed ID intr\n");
+ clear_bit(ID, &motg->inputs);
+ work = 1;
+ break;
+ }
pm_runtime_put_noidle(otg->phy->dev);
/*
* Only if autosuspend was enabled in probe, it will be
@@ -3124,10 +3150,8 @@
struct msm_otg *motg = container_of(w, struct msm_otg,
pmic_id_status_work.work);
int work = 0;
- unsigned long flags;
- local_irq_save(flags);
- if (irq_read_line(motg->pdata->pmic_id_irq)) {
+ if (msm_otg_read_pmic_id_state(motg)) {
if (!test_and_set_bit(ID, &motg->inputs)) {
pr_debug("PMIC: ID set\n");
work = 1;
@@ -3146,7 +3170,6 @@
else
queue_work(system_nrt_wq, &motg->sm_work);
}
- local_irq_restore(flags);
}
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index c6ffaf2..0411baa 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1407,30 +1407,37 @@
struct hdmi_disp_mode_list_type *disp_mode_list,
uint32 video_format)
{
- const struct hdmi_disp_mode_timing_type *timing =
- hdmi_common_get_supported_mode(video_format);
- boolean supported = timing != NULL;
+ const struct hdmi_disp_mode_timing_type *timing;
+ boolean supported = false;
+ boolean mhl_supported = true;
if (video_format >= HDMI_VFRMT_MAX)
return;
+ timing = hdmi_common_get_supported_mode(video_format);
+ supported = timing != NULL;
DEV_DBG("EDID: format: %d [%s], %s\n",
video_format, video_format_2string(video_format),
supported ? "Supported" : "Not-Supported");
- if (supported) {
- if (mhl_is_enabled()) {
- const struct hdmi_disp_mode_timing_type *mhl_timing =
- hdmi_mhl_get_supported_mode(video_format);
- boolean mhl_supported = mhl_timing != NULL;
- DEV_DBG("EDID: format: %d [%s], %s by MHL\n",
+
+ if (mhl_is_enabled()) {
+ const struct hdmi_disp_mode_timing_type *mhl_timing =
+ hdmi_mhl_get_supported_mode(video_format);
+ mhl_supported = mhl_timing != NULL;
+ DEV_DBG("EDID: format: %d [%s], %s by MHL\n",
video_format, video_format_2string(video_format),
- mhl_supported ? "Supported" : "Not-Supported");
- if (mhl_supported)
- disp_mode_list->disp_mode_list[
+ mhl_supported ? "Supported" : "Not-Supported");
+ }
+
+ if (supported && mhl_supported) {
+ disp_mode_list->disp_mode_list[
disp_mode_list->num_of_elements++] = video_format;
- } else
- disp_mode_list->disp_mode_list[
- disp_mode_list->num_of_elements++] = video_format;
+ if (video_format == external_common_state->video_resolution) {
+ DEV_DBG("%s: Default resolution %d [%s] supported\n",
+ __func__, video_format,
+ video_format_2string(video_format));
+ external_common_state->default_res_supported = true;
+ }
}
}
@@ -1866,6 +1873,7 @@
memset(&external_common_state->disp_mode_list, 0,
sizeof(external_common_state->disp_mode_list));
memset(edid_buf, 0, sizeof(edid_buf));
+ external_common_state->default_res_supported = false;
status = hdmi_common_read_edid_block(0, edid_buf);
if (status || !check_edid_header(edid_buf)) {
diff --git a/drivers/video/msm/external_common.h b/drivers/video/msm/external_common.h
index 43a8794..70a99ee 100644
--- a/drivers/video/msm/external_common.h
+++ b/drivers/video/msm/external_common.h
@@ -210,8 +210,10 @@
boolean hpd_state;
struct kobject *uevent_kobj;
uint32 video_resolution;
+ boolean default_res_supported;
struct device *dev;
struct switch_dev sdev;
+ struct switch_dev audio_sdev;
#ifdef CONFIG_FB_MSM_HDMI_3D
boolean format_3d;
void (*switch_3d)(boolean on);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 7a92645..516c92c 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -57,6 +57,22 @@
#define HDCP_DDC_CTRL_1 0x0124
#define HDMI_DDC_CTRL 0x020C
+#define HPD_DISCONNECT_POLARITY 0
+#define HPD_CONNECT_POLARITY 1
+
+#define SWITCH_SET_HDMI_AUDIO(d, force) \
+ do {\
+ if (!hdmi_msm_is_dvi_mode() &&\
+ ((force) ||\
+ (external_common_state->audio_sdev.state != (d)))) {\
+ switch_set_state(&external_common_state->audio_sdev,\
+ (d));\
+ DEV_INFO("%s: hdmi_audio state switched to %d\n",\
+ __func__,\
+ external_common_state->audio_sdev.state);\
+ } \
+ } while (0)
+
struct workqueue_struct *hdmi_work_queue;
struct hdmi_msm_state_type *hdmi_msm_state;
@@ -74,6 +90,7 @@
static int hdmi_msm_audio_off(void);
static int hdmi_msm_read_edid(void);
static void hdmi_msm_hpd_off(void);
+static boolean hdmi_msm_is_dvi_mode(void);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
@@ -768,8 +785,8 @@
/* Build EDID table */
hdmi_msm_read_edid();
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+ external_common_state->sdev.state);
DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE);
@@ -783,8 +800,8 @@
}
} else {
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("hdmi: Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: hdmi state switch to %d\n", __func__,
+ external_common_state->sdev.state);
DEV_INFO("hdmi: HDMI HPD: sense DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
@@ -793,66 +810,13 @@
static void hdmi_msm_hpd_state_work(struct work_struct *work)
{
- boolean hpd_state;
-
if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
!MSM_HDMI_BASE) {
DEV_ERR("hdmi: %s: ignored, probe failed\n", __func__);
return;
}
- mutex_lock(&hdmi_msm_state_mutex);
- DEV_DBG("%s: Handling HPD event in the workqueue\n", __func__);
-
- if (!hdmi_msm_state->hpd_cable_chg_detected) {
- /* The work item got called from outside the ISR */
- mutex_unlock(&hdmi_msm_state_mutex);
- if (external_common_state->hpd_state) {
- if (!external_common_state->
- disp_mode_list.num_of_elements)
- hdmi_msm_read_edid();
- }
- } else {
- hdmi_msm_state->hpd_cable_chg_detected = FALSE;
- mutex_unlock(&hdmi_msm_state_mutex);
- mutex_lock(&external_common_state_hpd_mutex);
- /*
- * Handle the connect event only if the cable is
- * still connected. This check is needed for the case
- * where we get a connect event followed by a disconnect
- * event in quick succession. In this case, there is no need
- * to process the connect event.
- */
- if ((external_common_state->hpd_state) &&
- !((HDMI_INP(0x0250) & 0x2) >> 1)) {
- external_common_state->hpd_state = 0;
- hdmi_msm_state->hpd_state_in_isr = 0;
- mutex_unlock(&external_common_state_hpd_mutex);
- DEV_DBG("%s: Ignoring HPD connect event\n", __func__);
- return;
- }
- mutex_unlock(&external_common_state_hpd_mutex);
- hdmi_msm_send_event(external_common_state->hpd_state);
- }
-
- /*
- * Wait for a short time before checking for
- * any changes in the connection status
- */
- udelay(100);
-
- mutex_lock(&external_common_state_hpd_mutex);
- /* HPD_INT_STATUS[0x0250] */
- hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
-
- if (external_common_state->hpd_state != hpd_state) {
- external_common_state->hpd_state = hpd_state;
- hdmi_msm_state->hpd_state_in_isr = hpd_state;
- mutex_unlock(&external_common_state_hpd_mutex);
- hdmi_msm_send_event(hpd_state);
- } else {
- mutex_unlock(&external_common_state_hpd_mutex);
- }
+ hdmi_msm_send_event(external_common_state->hpd_state);
}
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
@@ -965,9 +929,7 @@
DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
link_status);
if (hdmi_msm_state->full_auth_done) {
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ SWITCH_SET_HDMI_AUDIO(0, 0);
envp[0] = "HDCP_STATE=FAIL";
envp[1] = NULL;
@@ -1051,55 +1013,18 @@
/* HDMI_HPD_INT_CTRL[0x0254] */
hpd_int_ctrl = HDMI_INP_ND(0x0254);
if ((hpd_int_ctrl & (1 << 2)) && (hpd_int_status & (1 << 0))) {
- boolean cable_detected = (hpd_int_status & 2) >> 1;
- DEV_DBG("%s: HPD IRQ, Ctrl=%04x, State=%04x\n", __func__,
- hpd_int_ctrl, hpd_int_status);
+ /*
+ * Got HPD interrupt. Ack the interrupt and disable any
+ * further HPD interrupts until we process this interrupt.
+ */
+ HDMI_OUTP(0x0254, ((hpd_int_ctrl | (BIT(0))) & ~BIT(2)));
- /* Ack the interrupt */
- HDMI_OUTP(0x0254, (hpd_int_ctrl | (1 << 0)));
-
- mutex_lock(&external_common_state_hpd_mutex);
- if (hdmi_msm_state->hpd_state_in_isr == cable_detected) {
- DEV_INFO("%s: HPD has the same state. Ignoring\n",
- __func__);
- mutex_unlock(&external_common_state_hpd_mutex);
- } else {
- if (!mod_timer(&hdmi_msm_state->hpd_state_timer,
- jiffies + HZ/2)) {
- hdmi_msm_state->hpd_state_in_isr =
- cable_detected;
- hdmi_msm_state->hpd_cable_chg_detected = TRUE;
- DEV_DBG("%s: Scheduled work to handle HPD %s\n",
- __func__,
- cable_detected ? "connect"
- : "disconnect");
- }
-
- mutex_unlock(&external_common_state_hpd_mutex);
- /*
- * HDCP Compliance 1A-01:
- * The Quantum Data Box 882 triggers two consecutive
- * HPD events very close to each other as a part of this
- * test which can trigger two parallel HDCP auth threads
- * if HDCP authentication is going on and we get ISR
- * then stop the authentication , rather than
- * reauthenticating it again
- */
- if (hdmi_msm_state->hdcp_activating &&
- !(hdmi_msm_state->full_auth_done)) {
- DEV_DBG("%s getting hpd while authenticating\n",
- __func__);
- mutex_lock(&hdcp_auth_state_mutex);
- hdmi_msm_state->hpd_during_auth = TRUE;
- mutex_unlock(&hdcp_auth_state_mutex);
- }
- }
-
- /* Set up HPD_CTRL to sense HPD event */
- HDMI_OUTP(0x0254, 4 | (cable_detected ? 0 : 2));
- DEV_DBG("%s: Setting HPD_CTRL=%d\n", __func__,
- HDMI_INP(0x0254));
-
+ external_common_state->hpd_state =
+ (HDMI_INP(0x0250) & BIT(1)) >> 1;
+ DEV_DBG("%s: Queuing work to handle HPD %s event\n", __func__,
+ external_common_state->hpd_state ? "connect" :
+ "disconnect");
+ queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work);
return IRQ_HANDLED;
}
@@ -2468,8 +2393,8 @@
/* 0x0110 HDCP_CTRL
[8] ENCRYPTION_ENABLE
[0] ENABLE */
- /* encryption_enable | enable */
- HDMI_OUTP(0x0110, (1 << 8) | (1 << 0));
+ /* Enable HDCP. Encryption should be enabled after reading R0 */
+ HDMI_OUTP(0x0110, BIT(0));
/*
* Check to see if a HDCP DDC Failure is indicated in
@@ -2664,6 +2589,9 @@
goto error;
}
+ /* Enable HDCP Encryption */
+ HDMI_OUTP(0x0110, BIT(0) | BIT(8));
+
DEV_INFO("HDCP: authentication part I, successful\n");
is_part1_done = FALSE;
return 0;
@@ -3023,17 +2951,15 @@
mutex_unlock(&hdmi_msm_state_mutex);
mutex_lock(&hdcp_auth_state_mutex);
- /*
- * Initialize this to zero here to make
- * sure HPD has not happened yet
- */
- hdmi_msm_state->hpd_during_auth = FALSE;
/* This flag prevents other threads from re-authenticating
* after we've just authenticated (i.e., finished part3)
* We probably need to protect this in a mutex lock */
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
+ /* Disable HDCP before we start part1 */
+ HDMI_OUTP(0x0110, 0x0);
+
/* PART I Authentication*/
ret = hdcp_authentication_part1();
if (ret)
@@ -3082,17 +3008,13 @@
envp[1] = NULL;
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
+
+ SWITCH_SET_HDMI_AUDIO(1, 0);
}
- switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
return;
error:
- mutex_lock(&hdmi_msm_state_mutex);
- hdmi_msm_state->hdcp_activating = FALSE;
- mutex_unlock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->hpd_during_auth) {
DEV_WARN("Calling Deauthentication: HPD occured during "
"authentication from [%s]\n", __func__);
@@ -3106,9 +3028,9 @@
queue_work(hdmi_work_queue,
&hdmi_msm_state->hdcp_reauth_work);
}
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ mutex_lock(&hdmi_msm_state_mutex);
+ hdmi_msm_state->hdcp_activating = FALSE;
+ mutex_unlock(&hdmi_msm_state_mutex);
}
static void hdmi_msm_video_setup(int video_format)
@@ -3663,28 +3585,24 @@
static int hdmi_msm_audio_off(void)
{
- uint32 audio_pkt_ctrl, audio_cfg;
- /* Number of wait iterations */
- int i = 10;
- audio_pkt_ctrl = HDMI_INP_ND(0x0020);
- audio_cfg = HDMI_INP_ND(0x01D0);
+ uint32 audio_cfg;
+ int i, timeout_val = 50;
- /* Checking BIT[0] of AUDIO PACKET CONTROL and */
- /* AUDIO CONFIGURATION register */
- while (((audio_pkt_ctrl & 0x00000001) || (audio_cfg & 0x00000001))
- && (i--)) {
- audio_pkt_ctrl = HDMI_INP_ND(0x0020);
- audio_cfg = HDMI_INP_ND(0x01D0);
- DEV_DBG("%d times :: HDMI AUDIO PACKET is %08x and "
- "AUDIO CFG is %08x", i, audio_pkt_ctrl, audio_cfg);
- msleep(100);
- if (!i) {
- DEV_ERR("%s:failed to set BIT[0] AUDIO PACKET"
- "CONTROL or AUDIO CONFIGURATION REGISTER\n",
- __func__);
- return -ETIMEDOUT;
+ for (i = 0; (i < timeout_val) &&
+ ((audio_cfg = HDMI_INP_ND(0x01D0)) & BIT(0)); i++) {
+ DEV_DBG("%s: %d times: AUDIO CFG is %08xi\n", __func__,
+ i+1, audio_cfg);
+ if (!((i+1) % 10)) {
+ DEV_ERR("%s: audio still on after %d sec. try again\n",
+ __func__, (i+1)/10);
+ SWITCH_SET_HDMI_AUDIO(0, 1);
}
+ msleep(100);
}
+
+ if (i == timeout_val)
+ DEV_ERR("%s: Error: cannot turn off audio engine\n", __func__);
+
hdmi_msm_audio_info_setup(FALSE, 0, 0, 0, FALSE);
hdmi_msm_audio_acr_setup(FALSE, 0, 0, 0);
DEV_INFO("HDMI Audio: Disabled\n");
@@ -4197,8 +4115,17 @@
hdmi_msm_set_mode(TRUE);
hdmi_msm_video_setup(external_common_state->video_resolution);
- if (!hdmi_msm_is_dvi_mode())
+ if (!hdmi_msm_is_dvi_mode()) {
hdmi_msm_audio_setup();
+
+ /*
+ * Send the audio switch device notification if HDCP is
+ * not enabled. Otherwise, the notification would be
+ * sent after HDCP authentication is successful.
+ */
+ if (!hdmi_msm_state->hdcp_enable)
+ SWITCH_SET_HDMI_AUDIO(1, 0);
+ }
hdmi_msm_avi_info_frame();
#ifdef CONFIG_FB_MSM_HDMI_3D
hdmi_msm_vendor_infoframe_packetsetup();
@@ -4223,36 +4150,6 @@
DEV_INFO("HDMI Core: Initialized\n");
}
-static void hdmi_msm_hpd_state_timer(unsigned long data)
-{
- if (!work_busy(&hdmi_msm_state->hpd_state_work)) {
- /*
- * There is no event currently queued.
- * Only queue the work if this event has not already
- * been processed.
- */
- if (external_common_state->hpd_state !=
- hdmi_msm_state->hpd_state_in_isr) {
- /*
- * There is no need to use any synchronization
- * construct for safeguarding these state vairables
- * here since the only other place these are modified
- * is in the HPD work thread, which is known to be not
- * pending/running.
- */
- external_common_state->hpd_state =
- hdmi_msm_state->hpd_state_in_isr;
- DEV_DBG("%s: Queuing work to handle HPD %s event\n",
- __func__,
- external_common_state->hpd_state ?
- "connect" : "disconnect");
- queue_work(hdmi_work_queue,
- &hdmi_msm_state->hpd_state_work);
- return;
- }
- }
-}
-
static void hdmi_msm_hdcp_timer(unsigned long data)
{
if (!hdmi_msm_state->hdcp_enable) {
@@ -4270,6 +4167,27 @@
}
#endif
+static void hdmi_msm_hpd_polarity_setup(bool polarity, bool trigger)
+{
+ u32 cable_sense;
+ if (polarity)
+ HDMI_OUTP(0x0254, BIT(2) | BIT(1));
+ else
+ HDMI_OUTP(0x0254, BIT(2));
+
+ cable_sense = (HDMI_INP(0x0250) & BIT(1)) >> 1;
+ DEV_DBG("%s: listen=%s, sense=%s\n", __func__,
+ polarity ? "connect" : "disconnect",
+ cable_sense ? "connect" : "disconnect");
+ if (trigger && (cable_sense == polarity)) {
+ u32 reg_val = HDMI_INP(0x0258);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ HDMI_OUTP(0x0258, reg_val & ~BIT(28));
+ HDMI_OUTP(0x0258, reg_val | BIT(28));
+ }
+}
+
static void hdmi_msm_hpd_off(void)
{
int rc = 0;
@@ -4280,7 +4198,6 @@
}
DEV_DBG("%s: (timer, 5V, IRQ off)\n", __func__);
- del_timer(&hdmi_msm_state->hpd_state_timer);
disable_irq(hdmi_msm_state->irq);
/* Disable HPD interrupt */
@@ -4349,27 +4266,22 @@
/* Set up HPD state variables */
mutex_lock(&external_common_state_hpd_mutex);
external_common_state->hpd_state = 0;
- hdmi_msm_state->hpd_state_in_isr = 0;
mutex_unlock(&external_common_state_hpd_mutex);
mutex_lock(&hdmi_msm_state_mutex);
- hdmi_msm_state->hpd_cable_chg_detected = TRUE;
mutex_unlock(&hdmi_msm_state_mutex);
- /* Set up HPD_CTRL to sense HPD event */
- HDMI_OUTP(0x0254, 0x6);
- DEV_DBG("%s: Setting HPD_CTRL=%d\n", __func__,
- HDMI_INP(0x0254));
+ enable_irq(hdmi_msm_state->irq);
hdmi_msm_state->hpd_initialized = TRUE;
- enable_irq(hdmi_msm_state->irq);
-
/* set timeout to 4.1ms (max) for hardware debounce */
hpd_ctrl = HDMI_INP(0x0258) | 0x1FFF;
- /* Toggle HPD circuit to trigger HPD sense */
- HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
- HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
+ /* Turn on HPD HW circuit */
+ HDMI_OUTP(0x0258, hpd_ctrl | BIT(28));
+
+ /* Set up HPD_CTRL to sense HPD event */
+ hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY, true);
}
DEV_DBG("%s: (IRQ, 5V on)\n", __func__);
@@ -4413,35 +4325,54 @@
if (!hdmi_msm_state || !hdmi_msm_state->hdmi_app_clk || !MSM_HDMI_BASE)
return -ENODEV;
+ if (!hdmi_msm_state->hpd_initialized ||
+ !external_common_state->hpd_state) {
+ DEV_DBG("%s: HPD not initialized/cable not conn. Returning\n",
+ __func__);
+ return 0;
+ }
+
DEV_INFO("power: ON (%dx%d %d)\n", mfd->var_xres, mfd->var_yres,
mfd->var_pixclock);
+ /* Only start transmission with supported resolution */
changed = hdmi_common_get_video_format_from_drv_data(mfd);
- hdmi_msm_audio_info_setup(TRUE, 0, 0, 0, FALSE);
+ if (changed || external_common_state->default_res_supported) {
+ hdmi_msm_audio_info_setup(TRUE, 0, 0, 0, FALSE);
+ mutex_lock(&external_common_state_hpd_mutex);
+ hdmi_msm_state->panel_power_on = TRUE;
+ if (external_common_state->hpd_state &&
+ hdmi_msm_is_power_on()) {
+ DEV_DBG("%s: Turning HDMI on\n", __func__);
+ mutex_unlock(&external_common_state_hpd_mutex);
+ hdmi_msm_turn_on();
- mutex_lock(&external_common_state_hpd_mutex);
- hdmi_msm_state->panel_power_on = TRUE;
- if (external_common_state->hpd_state && hdmi_msm_is_power_on()) {
- DEV_DBG("%s: Turning HDMI on\n", __func__);
- mutex_unlock(&external_common_state_hpd_mutex);
- hdmi_msm_turn_on();
-
- if (hdmi_msm_state->hdcp_enable) {
- /* Kick off HDCP Authentication */
- mutex_lock(&hdcp_auth_state_mutex);
- hdmi_msm_state->reauth = FALSE;
- hdmi_msm_state->full_auth_done = FALSE;
- mutex_unlock(&hdcp_auth_state_mutex);
- mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2);
+ if (hdmi_msm_state->hdcp_enable) {
+ /* Kick off HDCP Authentication */
+ mutex_lock(&hdcp_auth_state_mutex);
+ hdmi_msm_state->reauth = FALSE;
+ hdmi_msm_state->full_auth_done = FALSE;
+ mutex_unlock(&hdcp_auth_state_mutex);
+ mod_timer(&hdmi_msm_state->hdcp_timer,
+ jiffies + HZ/2);
+ }
+ } else {
+ mutex_unlock(&external_common_state_hpd_mutex);
}
- } else
- mutex_unlock(&external_common_state_hpd_mutex);
- hdmi_msm_dump_regs("HDMI-ON: ");
+ hdmi_msm_dump_regs("HDMI-ON: ");
+ DEV_INFO("power=%s DVI= %s\n",
+ hdmi_msm_is_power_on() ? "ON" : "OFF" ,
+ hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
+ } else {
+ DEV_ERR("%s: Video fmt %d not supp. Returning\n",
+ __func__,
+ external_common_state->video_resolution);
+ }
- DEV_INFO("power=%s DVI= %s\n",
- hdmi_msm_is_power_on() ? "ON" : "OFF" ,
- hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
+ /* Enable HPD interrupt and listen to disconnect interrupts */
+ hdmi_msm_hpd_polarity_setup(HPD_DISCONNECT_POLARITY,
+ external_common_state->hpd_state);
return 0;
}
@@ -4450,11 +4381,6 @@
char *envp[2];
/* Simulating a HPD event based on MHL event */
- hdmi_msm_state->hpd_cable_chg_detected = FALSE;
- /* QDSP OFF preceding the HPD event notification */
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
if (on) {
hdmi_msm_read_edid();
hdmi_msm_state->reauth = FALSE ;
@@ -4472,8 +4398,8 @@
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: hdmi state switched to %d\n",
+ __func__, external_common_state->sdev.state);
} else {
hdmi_msm_hdcp_enable();
}
@@ -4482,8 +4408,8 @@
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switched to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+ external_common_state->sdev.state);
}
}
EXPORT_SYMBOL(mhl_connect_api);
@@ -4499,21 +4425,48 @@
if (!hdmi_msm_state->hdmi_app_clk)
return -ENODEV;
- mutex_lock(&hdmi_msm_state_mutex);
- if (hdmi_msm_state->hdcp_activating) {
- hdmi_msm_state->panel_power_on = FALSE;
- mutex_unlock(&hdmi_msm_state_mutex);
- DEV_INFO("HDCP: activating, returning\n");
+ if (!hdmi_msm_state->panel_power_on) {
+ DEV_DBG("%s: panel not on. returning\n", __func__);
return 0;
}
- mutex_unlock(&hdmi_msm_state_mutex);
- DEV_INFO("power: OFF (audio off, Reset Core)\n");
- hdmi_msm_audio_off();
- hdcp_deauthenticate();
+ if (hdmi_msm_state->hdcp_enable) {
+ if (hdmi_msm_state->hdcp_activating) {
+ /*
+ * Let the HDCP work know that we got an HPD
+ * disconnect so that it can stop the
+ * reauthentication loop.
+ */
+ mutex_lock(&hdcp_auth_state_mutex);
+ hdmi_msm_state->hpd_during_auth = TRUE;
+ mutex_unlock(&hdcp_auth_state_mutex);
+ }
+
+ /*
+ * Cancel any pending reauth attempts.
+ * If one is ongoing, wait for it to finish
+ */
+ cancel_work_sync(&hdmi_msm_state->hdcp_reauth_work);
+ cancel_work_sync(&hdmi_msm_state->hdcp_work);
+ del_timer_sync(&hdmi_msm_state->hdcp_timer);
+
+ hdcp_deauthenticate();
+ }
+
+ SWITCH_SET_HDMI_AUDIO(0, 0);
+
+ if (!hdmi_msm_is_dvi_mode())
+ hdmi_msm_audio_off();
+
hdmi_msm_powerdown_phy();
hdmi_msm_state->panel_power_on = FALSE;
+ DEV_INFO("power: OFF (audio off)\n");
+
+ /* Enable HPD interrupt and listen to connect interrupts */
+ hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY,
+ !external_common_state->hpd_state);
+
return 0;
}
@@ -4649,13 +4602,6 @@
}
disable_irq(hdmi_msm_state->irq);
- init_timer(&hdmi_msm_state->hpd_state_timer);
- hdmi_msm_state->hpd_state_timer.function =
- hdmi_msm_hpd_state_timer;
- hdmi_msm_state->hpd_state_timer.data = (uint32)NULL;
-
- hdmi_msm_state->hpd_state_timer.expires = 0xffffffffL;
-
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
init_timer(&hdmi_msm_state->cec_read_timer);
hdmi_msm_state->cec_read_timer.function =
@@ -4689,8 +4635,19 @@
external_common_state->sdev.name = "hdmi_as_primary";
else
external_common_state->sdev.name = "hdmi";
- if (switch_dev_register(&external_common_state->sdev) < 0)
+ if (switch_dev_register(&external_common_state->sdev) < 0) {
DEV_ERR("Hdmi switch registration failed\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ external_common_state->audio_sdev.name = "hdmi_audio";
+ if (switch_dev_register(&external_common_state->audio_sdev) < 0) {
+ DEV_ERR("Hdmi audio switch registration failed\n");
+ switch_dev_unregister(&external_common_state->sdev);
+ rc = -ENODEV;
+ goto error;
+ }
/* Set the default video resolution for MHL-enabled display */
if (hdmi_msm_state->is_mhl_enabled) {
@@ -4733,6 +4690,7 @@
/* Unregister hdmi node from switch driver */
switch_dev_unregister(&external_common_state->sdev);
+ switch_dev_unregister(&external_common_state->audio_sdev);
hdmi_msm_hpd_off();
free_irq(hdmi_msm_state->irq, NULL);
@@ -4772,9 +4730,14 @@
if (on) {
rc = hdmi_msm_hpd_on();
} else {
+ external_common_state->hpd_state = 0;
hdmi_msm_hpd_off();
+ SWITCH_SET_HDMI_AUDIO(0, 0);
+
/* Set HDMI switch node to 0 on HPD feature disable */
switch_set_state(&external_common_state->sdev, 0);
+ DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+ external_common_state->sdev.state);
}
return rc;
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 20bd492..17cefdd 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -53,15 +53,12 @@
struct hdmi_msm_state_type {
boolean panel_power_on;
boolean hpd_initialized;
- boolean hpd_state_in_isr;
#ifdef CONFIG_SUSPEND
boolean pm_suspended;
#endif
- boolean hpd_cable_chg_detected;
boolean full_auth_done;
boolean hpd_during_auth;
struct work_struct hpd_state_work;
- struct timer_list hpd_state_timer;
struct completion ddc_sw_done;
bool hdcp_enable;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index fc512c1..a827d6a 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2510,7 +2510,6 @@
#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
struct mipi_panel_info *mipi;
#endif
- unsigned int mdp_r = 0;
if ((pdev->id == 0) && (pdev->num_resources > 0)) {
mdp_init_pdev = pdev;
@@ -2532,14 +2531,6 @@
}
mdp_rev = mdp_pdata->mdp_rev;
- if (mdp_rev == MDP_REV_42) {
- mdp_r = inpdw(MDP_BASE + 0x0);
- mdp_r = ((mdp_r & 0x30000) >> 16);
- if (mdp_r == 3) {
- mdp_rev = MDP_REV_43;
- mdp_pdata->mdp_rev = MDP_REV_43;
- }
- }
mdp_iommu_split_domain = mdp_pdata->mdp_iommu_split_domain;
@@ -3127,7 +3118,8 @@
{
mdp_suspend_sub();
#ifdef CONFIG_FB_MSM_DTV
- mdp4_dtv_set_black_screen(FALSE);
+ mdp4_solidfill_commit(MDP4_MIXER1);
+ mdp4_dtv_set_black_screen();
#endif
mdp_footswitch_ctrl(FALSE);
}
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 2f4fac1..ee2405e 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -28,6 +28,8 @@
extern char *mmss_cc_base; /* mutimedia sub system clock control */
extern spinlock_t dsi_clk_lock;
extern u32 mdp_max_clk;
+extern u32 dbg_force_ov0_blt;
+extern u32 dbg_force_ov1_blt;
#define MDP4_OVERLAYPROC0_BASE 0x10000
#define MDP4_OVERLAYPROC1_BASE 0x18000
@@ -531,7 +533,7 @@
}
#endif /* CONFIG_FB_MSM_DTV */
-void mdp4_dtv_set_black_screen(bool commit);
+void mdp4_dtv_set_black_screen(void);
int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
@@ -589,10 +591,11 @@
int mdp4_overlay_play_wait(struct fb_info *info,
struct msmfb_overlay_data *req);
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
-int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_overlay_commit(struct fb_info *info);
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
void mdp4_overlay_dma_commit(int mixer);
void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_solidfill_commit(int mixer);
void mdp4_mixer_stage_commit(int mixer);
void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
@@ -988,9 +991,15 @@
{
/* empty */
}
+static inline int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+ int cndx, int wait)
+{
+ return 0;
+}
#else
void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_wfd_init(int cndx);
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd, int cndx, int wait);
#endif
#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4_dtv.c b/drivers/video/msm/mdp4_dtv.c
index bd0ce2f..4b83224 100644
--- a/drivers/video/msm/mdp4_dtv.c
+++ b/drivers/video/msm/mdp4_dtv.c
@@ -38,6 +38,7 @@
static int dtv_off(struct platform_device *pdev);
static int dtv_on(struct platform_device *pdev);
+static int dtv_off_sub(void);
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -45,6 +46,9 @@
static struct clk *tv_src_clk;
static struct clk *hdmi_clk;
static struct clk *mdp_tv_clk;
+static struct platform_device *dtv_pdev;
+static struct workqueue_struct *dtv_work_queue;
+static struct work_struct dtv_off_work;
static int mdp4_dtv_runtime_suspend(struct device *dev)
@@ -86,8 +90,48 @@
static int dtv_off(struct platform_device *pdev)
{
int ret = 0;
+ struct msm_fb_data_type *mfd = NULL;
- ret = panel_next_off(pdev);
+ if (!pdev) {
+ pr_err("%s: FAILED: invalid arg\n", __func__);
+ return -EINVAL;
+ }
+
+ mfd = platform_get_drvdata(pdev);
+ if (!mfd) {
+ pr_err("%s: FAILED: invalid mfd\n", __func__);
+ return -EINVAL;
+ }
+
+ dtv_pdev = pdev;
+ /*
+ * If it's a suspend operation then handle the device
+ * power down synchronously.
+ * Otherwise, queue work item to handle power down sequence.
+ * This is needed since we need to wait for the audio engine
+ * to shutdown first before we turn off the DTV device.
+ */
+ if (!mfd->suspend.op_suspend) {
+ pr_debug("%s: Queuing work to turn off HDMI core\n", __func__);
+ queue_work(dtv_work_queue, &dtv_off_work);
+ } else {
+ pr_debug("%s: turning off HDMI core\n", __func__);
+ ret = dtv_off_sub();
+ }
+
+ return ret;
+}
+
+static int dtv_off_sub(void)
+{
+ int ret = 0;
+
+ if (!dtv_pdev) {
+ pr_err("%s: FAILED: invalid arg\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = panel_next_off(dtv_pdev);
pr_info("%s\n", __func__);
@@ -112,12 +156,20 @@
return ret;
}
+static void dtv_off_work_func(struct work_struct *work)
+{
+ dtv_off_sub();
+}
+
static int dtv_on(struct platform_device *pdev)
{
int ret = 0;
struct msm_fb_data_type *mfd;
unsigned long panel_pixclock_freq , pm_qos_rate;
+ /* If a power down is already underway, wait for it to finish */
+ flush_work_sync(&dtv_off_work);
+
mfd = platform_get_drvdata(pdev);
panel_pixclock_freq = mfd->fbi->var.pixclock;
@@ -215,6 +267,8 @@
return 0;
}
+ dtv_work_queue = create_singlethread_workqueue("dtv_work");
+ INIT_WORK(&dtv_off_work, dtv_off_work_func);
mfd = platform_get_drvdata(pdev);
if (!mfd)
@@ -302,6 +356,8 @@
static int dtv_remove(struct platform_device *pdev)
{
+ if (dtv_work_queue)
+ destroy_workqueue(dtv_work_queue);
#ifdef CONFIG_MSM_BUS_SCALING
if (dtv_pdata && dtv_pdata->bus_scale_table &&
dtv_bus_scale_handle > 0)
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 11952f3..05344fc 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -48,6 +48,7 @@
struct mdp4_overlay_pipe *stage[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
+ struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -121,6 +122,7 @@
static struct ion_client *display_iclient;
+static void mdp4_overlay_bg_solidfill(struct blend_cfg *blend);
/*
* mdp4_overlay_iommu_unmap_freelist()
@@ -778,6 +780,7 @@
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
+ case MDP_Y_CBCR_H1V2:
*luma_off = pipe->src_x +
(pipe->src_y * pipe->srcp0_ystride);
*chroma_off = pipe->src_x +
@@ -987,6 +990,7 @@
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
+ case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_TILE:
@@ -1171,6 +1175,7 @@
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
+ case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H1V1:
@@ -1211,6 +1216,10 @@
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_H1V2;
+ } else if (pipe->src_format == MDP_Y_CBCR_H1V2) {
+ pipe->element1 = C2_R_Cr;
+ pipe->element0 = C1_B_Cb;
+ pipe->chroma_sample = MDP4_CHROMA_H1V2;
} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
@@ -1372,6 +1381,7 @@
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CRCB_H1V2:
+ case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_YCRCB_H1V1:
@@ -1604,6 +1614,35 @@
return cnt;
}
+void mdp4_solidfill_commit(int mixer)
+{
+ struct blend_cfg bcfg;
+ struct mdp4_overlay_pipe *pp = NULL;
+ int i = 0;
+
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+ pp = &ctrl->sf_plist[mixer][i];
+ if (pp->pipe_ndx && pp->solid_fill) {
+ bcfg.solidfill = 1;
+ bcfg.solidfill_pipe = pp;
+ mdp4_overlay_bg_solidfill(&bcfg);
+ mdp4_overlay_reg_flush(pp, 1);
+ mdp4_mixer_stage_up(pp, 0);
+ }
+ }
+ mdp4_mixer_stage_commit(MDP4_MIXER1);
+
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+ pp = &ctrl->sf_plist[mixer][i];
+ if (pp->pipe_ndx && pp->solid_fill) {
+ mdp4_overlay_reg_flush(pp, 1);
+ mdp4_mixer_stage_down(pp, 0);
+ pp->solid_fill = 0;
+ }
+ }
+ mdp4_mixer_stage_commit(MDP4_MIXER1);
+}
+
void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
@@ -1701,7 +1740,8 @@
ctrl->stage[mixer][i] = NULL; /* clear it */
}
- if (commit || (mixer > 0 && !hdmi_prim_display))
+ if (commit || ((mixer == 1) && !hdmi_prim_display) ||
+ (mixer == 2))
mdp4_mixer_stage_commit(mixer);
}
/*
@@ -1945,7 +1985,7 @@
struct mdp4_overlay_pipe *d_pipe;
struct mdp4_overlay_pipe *s_pipe;
struct blend_cfg *blend;
- int i, off, alpha_drop = 0;
+ int i, off, alpha_drop;
int d_alpha, s_alpha;
unsigned char *overlay_base;
uint32 c0, c1, c2, base_premulti;
@@ -1971,8 +2011,10 @@
d_alpha = 0;
continue;
}
+ alpha_drop = 0; /* per stage */
/* alpha channel is lost on VG pipe when using QSEED or M/N */
if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
+ s_pipe->alpha_enable &&
((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
!(s_pipe->op_mode & (MDP4_OP_SCALEX_PIXEL_RPT |
@@ -2085,7 +2127,9 @@
outpdw(overlay_base + off + 0x108, blend->fg_alpha);
outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
- if (mdp_rev >= MDP_REV_42)
+ if (mdp_rev >= MDP_REV_42 ||
+ ctrl->panel_mode & MDP4_PANEL_MDDI ||
+ ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
outpdw(overlay_base + off + 0x104, blend->op);
outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
@@ -2851,8 +2895,9 @@
perf_req->mdp_bw);
perf_cur->mdp_bw = perf_req->mdp_bw;
}
- if (mfd->panel_info.pdest == DISPLAY_1 &&
- perf_req->use_ov0_blt && !perf_cur->use_ov0_blt) {
+ if ((mfd->panel_info.pdest == DISPLAY_1 &&
+ perf_req->use_ov0_blt && !perf_cur->use_ov0_blt) ||
+ dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_start(mfd);
@@ -2862,17 +2907,18 @@
mdp4_dsi_cmd_blt_start(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
mdp4_mddi_blt_start(mfd);
- pr_info("%s mixer0 start blt [%d] from %d to %d.\n",
+ pr_debug("%s mixer0 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov0_blt,
perf_req->use_ov0_blt);
perf_cur->use_ov0_blt = perf_req->use_ov0_blt;
}
- if (mfd->panel_info.pdest == DISPLAY_2 &&
- perf_req->use_ov1_blt && !perf_cur->use_ov1_blt) {
+ if ((mfd->panel_info.pdest == DISPLAY_2 &&
+ perf_req->use_ov1_blt && !perf_cur->use_ov1_blt) ||
+ dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_start(mfd);
- pr_info("%s mixer1 start blt [%d] from %d to %d.\n",
+ pr_debug("%s mixer1 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov1_blt,
@@ -2901,8 +2947,9 @@
perf_req->mdp_bw);
perf_cur->mdp_bw = perf_req->mdp_bw;
}
- if (mfd->panel_info.pdest == DISPLAY_1 &&
- !perf_req->use_ov0_blt && perf_cur->use_ov0_blt) {
+ if ((mfd->panel_info.pdest == DISPLAY_1 &&
+ !perf_req->use_ov0_blt && perf_cur->use_ov0_blt) ||
+ dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_stop(mfd);
@@ -2912,17 +2959,18 @@
mdp4_dsi_cmd_blt_stop(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
mdp4_mddi_blt_stop(mfd);
- pr_info("%s mixer0 stop blt [%d] from %d to %d.\n",
+ pr_debug("%s mixer0 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov0_blt,
perf_req->use_ov0_blt);
perf_cur->use_ov0_blt = perf_req->use_ov0_blt;
}
- if (mfd->panel_info.pdest == DISPLAY_2 &&
- !perf_req->use_ov1_blt && perf_cur->use_ov1_blt) {
+ if ((mfd->panel_info.pdest == DISPLAY_2 &&
+ !perf_req->use_ov1_blt && perf_cur->use_ov1_blt) ||
+ dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_stop(mfd);
- pr_info("%s mixer1 stop blt [%d] from %d to %d.\n",
+ pr_debug("%s mixer1 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov1_blt,
@@ -3113,9 +3161,6 @@
__func__);
}
- if (hdmi_prim_display)
- fill_black_screen(FALSE, pipe->pipe_num, pipe->mixer_num);
-
mdp4_overlay_mdp_pipe_req(pipe, mfd);
mutex_unlock(&mfd->dma->ov_mutex);
@@ -3189,9 +3234,13 @@
} else { /* mixer1, DTV, ATV */
if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- if (hdmi_prim_display)
- fill_black_screen(TRUE, pipe->pipe_num,
- pipe->mixer_num);
+ if (hdmi_prim_display) {
+ struct mdp4_overlay_pipe *pp;
+ pp = &ctrl->sf_plist[pipe->mixer_num]
+ [pipe->pipe_ndx - 1];
+ *pp = *pipe; /* clone it */
+ pp->solid_fill = 1;
+ }
mdp4_overlay_dtv_unset(mfd, pipe);
}
}
@@ -3396,7 +3445,8 @@
pipe->srcp0_ystride = pipe->src_width;
if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
(pipe->src_format == MDP_Y_CBCR_H1V1) ||
- (pipe->src_format == MDP_Y_CRCB_H1V2)) {
+ (pipe->src_format == MDP_Y_CRCB_H1V2) ||
+ (pipe->src_format == MDP_Y_CBCR_H1V2)) {
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->srcp1_ystride = pipe->src_width << 2;
else
@@ -3496,8 +3546,9 @@
return ret;
}
-int mdp4_overlay_commit(struct fb_info *info, int mixer)
+int mdp4_overlay_commit(struct fb_info *info)
{
+ int ret = 0;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (mfd == NULL)
@@ -3506,34 +3557,37 @@
if (!mfd->panel_power_on) /* suspended */
return -EINVAL;
- if (mixer >= MDP4_MIXER_MAX)
- return -EPERM;
-
mutex_lock(&mfd->dma->ov_mutex);
mdp4_overlay_mdp_perf_upd(mfd, 1);
- if (mixer == MDP4_MIXER0) {
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- /* cndx = 0 */
- mdp4_dsi_cmd_pipe_commit(0, 1);
- } else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- /* cndx = 0 */
- mdp4_dsi_video_pipe_commit(0, 1);
- } else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- /* cndx = 0 */
- mdp4_lcdc_pipe_commit(0, 1);
- }
- } else if (mixer == MDP4_MIXER1) {
- if (ctrl->panel_mode & MDP4_PANEL_DTV)
- mdp4_dtv_pipe_commit(0, 1);
+ switch (mfd->panel.type) {
+ case MIPI_CMD_PANEL:
+ mdp4_dsi_cmd_pipe_commit(0, 1);
+ break;
+ case MIPI_VIDEO_PANEL:
+ mdp4_dsi_video_pipe_commit(0, 1);
+ break;
+ case LCDC_PANEL:
+ mdp4_lcdc_pipe_commit(0, 1);
+ break;
+ case DTV_PANEL:
+ mdp4_dtv_pipe_commit(0, 1);
+ break;
+ case WRITEBACK_PANEL:
+ mdp4_wfd_pipe_commit(mfd, 0, 1);
+ break;
+ default:
+ pr_err("Panel Not Supported for Commit");
+ ret = -EINVAL;
+ break;
}
mdp4_overlay_mdp_perf_upd(mfd, 0);
mutex_unlock(&mfd->dma->ov_mutex);
- return 0;
+ return ret;
}
struct msm_iommu_ctx {
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index f5df938..239d9f5 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -168,6 +168,8 @@
pipe = vctrl->base_pipe;
mixer = pipe->mixer_num;
+ mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
if (vp->update_cnt == 0) {
mutex_unlock(&vctrl->update_lock);
return cnt;
@@ -1110,7 +1112,6 @@
mdp4_dsi_video_pipe_queue(0, pipe);
}
- mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
mdp4_overlay_mdp_perf_upd(mfd, 1);
cnt = mdp4_dsi_video_pipe_commit(cndx, 0);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 4db684b..1de5d6e 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -72,6 +72,7 @@
struct completion dmae_comp;
struct completion vsync_comp;
spinlock_t spin_lock;
+ struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *base_pipe;
struct vsync_update vlist[2];
int vsync_irq_enabled;
@@ -180,6 +181,8 @@
mixer = pipe->mixer_num;
mdp4_overlay_iommu_unmap_freelist(mixer);
+ mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
if (vp->update_cnt == 0) {
mutex_unlock(&vctrl->update_lock);
return 0;
@@ -442,6 +445,9 @@
int hsync_end_x;
struct fb_info *fbi;
struct fb_var_screeninfo *var;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[0];
if (!mfd)
return -ENODEV;
@@ -452,6 +458,8 @@
fbi = mfd->fbi;
var = &fbi->var;
+ vctrl->mfd = mfd;
+
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (hdmi_prim_display) {
if (is_mdp4_hw_reset()) {
@@ -983,22 +991,27 @@
spin_unlock(&vctrl->spin_lock);
}
-void mdp4_dtv_set_black_screen(bool commit)
+void mdp4_dtv_set_black_screen()
{
char *rgb_base;
/*Black color*/
uint32 color = 0x00000000;
uint32 temp_src_format;
- int cndx = 0;
+ int commit = 1, cndx = 0;
+ int pipe_num = OVERLAY_PIPE_RGB1;
struct vsycn_ctrl *vctrl;
vctrl = &vsync_ctrl_db[cndx];
- if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
- pr_debug("dtv_pipe is not configured yet\n");
+ if (!hdmi_prim_display)
return;
- }
+
+ if (vctrl->base_pipe == NULL)
+ commit = 0;
+ else
+ pipe_num = vctrl->base_pipe->pipe_num;
+
rgb_base = MDP_BASE;
- rgb_base += (MDP4_RGB_OFF * (vctrl->base_pipe->pipe_num + 2));
+ rgb_base += (MDP4_RGB_OFF * (pipe_num + 2));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -1013,13 +1026,12 @@
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
-
mdp4_mixer_stage_up(vctrl->base_pipe, 0);
mdp4_mixer_stage_commit(vctrl->base_pipe->mixer_num);
} else {
/* MDP_OVERLAY_REG_FLUSH for pipe*/
MDP_OUTP(MDP_BASE + 0x18000,
- BIT(vctrl->base_pipe->pipe_num + 2) | BIT(MDP4_MIXER1));
+ BIT(pipe_num + 2) | BIT(MDP4_MIXER1));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
}
@@ -1132,7 +1144,6 @@
pipe->srcp0_addr = (uint32)mfd->ibuf.buf;
mdp4_dtv_pipe_queue(0, pipe);
}
- mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
if (hdmi_prim_display)
wait = 1;
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 172687a..a7058ce 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -172,6 +172,8 @@
pipe = vctrl->base_pipe;
mixer = pipe->mixer_num;
+ mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
if (vp->update_cnt == 0) {
mutex_unlock(&vctrl->update_lock);
return 0;
@@ -969,7 +971,6 @@
mdp4_lcdc_pipe_queue(0, pipe);
}
- mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
mdp4_overlay_mdp_perf_upd(mfd, 1);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 6c2b1f6..aa50d94 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -88,6 +88,10 @@
}
static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list *node);
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list **wfdnode);
int mdp4_overlay_writeback_on(struct platform_device *pdev)
{
@@ -317,7 +321,8 @@
static void mdp4_wfd_wait4ov(int cndx);
-int mdp4_wfd_pipe_commit(void)
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+ int cndx, int wait)
{
int i, undx;
int mixer = 0;
@@ -327,8 +332,9 @@
struct mdp4_overlay_pipe *real_pipe;
unsigned long flags;
int cnt = 0;
+ struct msmfb_writeback_data_list *node = NULL;
- vctrl = &vsync_ctrl_db[0];
+ vctrl = &vsync_ctrl_db[cndx];
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
@@ -346,6 +352,8 @@
vp->update_cnt = 0; /* reset */
mutex_unlock(&vctrl->update_lock);
+ mdp4_wfd_dequeue_update(mfd, &node);
+
/* free previous committed iommu back to pool */
mdp4_overlay_iommu_unmap_freelist(mixer);
@@ -383,6 +391,11 @@
mdp4_stat.overlay_commit[pipe->mixer_num]++;
+ if (wait)
+ mdp4_wfd_wait4ov(cndx);
+
+ mdp4_wfd_queue_wakeup(mfd, node);
+
return cnt;
}
@@ -444,7 +457,6 @@
void mdp4_writeback_overlay(struct msm_fb_data_type *mfd)
{
- struct msmfb_writeback_data_list *node = NULL;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
@@ -456,36 +468,7 @@
vctrl = &vsync_ctrl_db[0];
pipe = vctrl->base_pipe;
- mutex_lock(&mfd->unregister_mutex);
- mutex_lock(&mfd->writeback_mutex);
- if (!list_empty(&mfd->writeback_free_queue)
- && mfd->writeback_state != WB_STOPING
- && mfd->writeback_state != WB_STOP) {
- node = list_first_entry(&mfd->writeback_free_queue,
- struct msmfb_writeback_data_list, active_entry);
- }
- if (node) {
- list_del(&(node->active_entry));
- node->state = IN_BUSY_QUEUE;
- mfd->writeback_active_cnt++;
- }
- mutex_unlock(&mfd->writeback_mutex);
-
- pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
-
- if (!pipe->ov_blt_addr) {
- pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
- (unsigned int)pipe->ov_blt_addr, node);
- mutex_unlock(&mfd->unregister_mutex);
- return;
- }
-
mutex_lock(&mfd->dma->ov_mutex);
- if (pipe && !pipe->ov_blt_addr) {
- pr_err("%s: no writeback buffer 0x%x\n", __func__,
- (unsigned int)pipe->ov_blt_addr);
- goto fail_no_blt_addr;
- }
if (pipe->pipe_type == OVERLAY_TYPE_RGB)
mdp4_wfd_pipe_queue(0, pipe);
@@ -493,26 +476,15 @@
mdp4_overlay_mdp_perf_upd(mfd, 1);
mdp_clk_ctrl(1);
- mdp4_overlay_writeback_update(mfd);
- mdp4_wfd_pipe_commit();
+ mdp4_wfd_pipe_commit(mfd, 0, 1);
mdp4_overlay_mdp_perf_upd(mfd, 0);
- mdp4_wfd_wait4ov(0);
mdp_clk_ctrl(0);
- mutex_lock(&mfd->writeback_mutex);
- list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
- mfd->writeback_active_cnt--;
- mutex_unlock(&mfd->writeback_mutex);
- wake_up(&mfd->wait_q);
-fail_no_blt_addr:
- /*NOTE: This api was removed
- mdp4_overlay_resource_release();*/
mutex_unlock(&mfd->dma->ov_mutex);
- mutex_unlock(&mfd->unregister_mutex);
- pr_debug("%s:-\n", __func__);
+
}
static int mdp4_overlay_writeback_register_buffer(
@@ -763,3 +735,68 @@
mutex_unlock(&mfd->unregister_mutex);
return rc;
}
+
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list **wfdnode)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ struct msmfb_writeback_data_list *node = NULL;
+
+ if (mfd && !mfd->panel_power_on)
+ return;
+
+ pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
+
+ vctrl = &vsync_ctrl_db[0];
+ pipe = vctrl->base_pipe;
+
+ mutex_lock(&mfd->unregister_mutex);
+ mutex_lock(&mfd->writeback_mutex);
+ if (!list_empty(&mfd->writeback_free_queue)
+ && mfd->writeback_state != WB_STOPING
+ && mfd->writeback_state != WB_STOP) {
+ node = list_first_entry(&mfd->writeback_free_queue,
+ struct msmfb_writeback_data_list, active_entry);
+ }
+ if (node) {
+ list_del(&(node->active_entry));
+ node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
+ }
+ mutex_unlock(&mfd->writeback_mutex);
+
+ pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+
+ if (!pipe->ov_blt_addr) {
+ pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
+ (unsigned int)pipe->ov_blt_addr, node);
+ mutex_unlock(&mfd->unregister_mutex);
+ return;
+ }
+
+ mdp4_overlay_writeback_update(mfd);
+
+ *wfdnode = node;
+
+ mutex_unlock(&mfd->unregister_mutex);
+}
+
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list *node)
+{
+
+ if (mfd && !mfd->panel_power_on)
+ return;
+
+ if (node == NULL)
+ return;
+
+ pr_debug("%s: mfd=%x node: %p", __func__, (int)mfd, node);
+
+ mutex_lock(&mfd->writeback_mutex);
+ list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
+ mfd->writeback_active_cnt--;
+ mutex_unlock(&mfd->writeback_mutex);
+ wake_up(&mfd->wait_q);
+}
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 54f5ef5..767375d 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -1029,6 +1029,135 @@
.write = dbg_reg_write,
};
+u32 dbg_force_ov0_blt;
+u32 dbg_force_ov1_blt;
+
+static ssize_t dbg_force_ov0_blt_read(
+ struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos) {
+ int len;
+
+ if (*ppos)
+ return 0;
+
+ len = snprintf(debug_buf, sizeof(debug_buf),
+ "%d\n", dbg_force_ov0_blt);
+
+ if (len < 0)
+ return 0;
+
+ if (copy_to_user(buff, debug_buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+
+ return len;
+}
+
+static ssize_t dbg_force_ov0_blt_write(
+ struct file *file,
+ const char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ u32 cnt;
+
+ if (count >= sizeof(debug_buf))
+ return -EFAULT;
+
+ if (copy_from_user(debug_buf, buff, count))
+ return -EFAULT;
+
+ debug_buf[count] = 0; /* end of string */
+
+ cnt = sscanf(debug_buf, "%x", &dbg_force_ov0_blt);
+
+ pr_info("%s: dbg_force_ov0_blt = %x\n",
+ __func__, dbg_force_ov0_blt);
+
+ if ((dbg_force_ov0_blt & 0x0f) > 2)
+ pr_err("%s: invalid dbg_force_ov0_blt = %d\n",
+ __func__, dbg_force_ov0_blt);
+
+ if ((dbg_force_ov0_blt >> 4) > 2)
+ pr_err("%s: invalid dbg_force_ov0_blt = %d\n",
+ __func__, dbg_force_ov0_blt);
+
+ return count;
+}
+
+static const struct file_operations dbg_force_ov0_blt_fops = {
+ .open = dbg_open,
+ .release = dbg_release,
+ .read = dbg_force_ov0_blt_read,
+ .write = dbg_force_ov0_blt_write,
+};
+
+static ssize_t dbg_force_ov1_blt_read(
+ struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos) {
+ int len;
+
+ if (*ppos)
+ return 0;
+
+ len = snprintf(debug_buf, sizeof(debug_buf),
+ "%x\n", dbg_force_ov1_blt);
+
+ if (len < 0)
+ return 0;
+
+ if (copy_to_user(buff, debug_buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+
+ return len;
+}
+
+static ssize_t dbg_force_ov1_blt_write(
+ struct file *file,
+ const char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ u32 cnt;
+
+ if (count >= sizeof(debug_buf))
+ return -EFAULT;
+
+ if (copy_from_user(debug_buf, buff, count))
+ return -EFAULT;
+
+ debug_buf[count] = 0; /* end of string */
+
+ cnt = sscanf(debug_buf, "%x", &dbg_force_ov1_blt);
+
+ pr_info("%s: dbg_force_ov1_blt = %x\n",
+ __func__, dbg_force_ov1_blt);
+
+ if ((dbg_force_ov1_blt & 0x0f) > 2)
+ pr_err("%s: invalid dbg_force_ov1_blt = %x\n",
+ __func__, dbg_force_ov1_blt);
+
+ if ((dbg_force_ov1_blt >> 4) > 2)
+ pr_err("%s: invalid dbg_force_ov1_blt = %d\n",
+ __func__, dbg_force_ov1_blt);
+
+ return count;
+}
+
+static const struct file_operations dbg_force_ov1_blt_fops = {
+ .open = dbg_open,
+ .release = dbg_release,
+ .read = dbg_force_ov1_blt_read,
+ .write = dbg_force_ov1_blt_write,
+};
+
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
static uint32 hdmi_offset;
static uint32 hdmi_count;
@@ -1249,6 +1378,22 @@
}
#endif
+ if (debugfs_create_file("force_ov0_blt", 0644, dent, 0,
+ &dbg_force_ov0_blt_fops)
+ == NULL) {
+ pr_err("%s(%d): debugfs_create_file: debug fail\n",
+ __FILE__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (debugfs_create_file("force_ov1_blt", 0644, dent, 0,
+ &dbg_force_ov1_blt_fops)
+ == NULL) {
+ pr_err("%s(%d): debugfs_create_file: debug fail\n",
+ __FILE__, __LINE__);
+ return -EFAULT;
+ }
+
dent = debugfs_create_dir("mddi", NULL);
if (IS_ERR(dent)) {
diff --git a/drivers/video/msm/mdss/Kconfig b/drivers/video/msm/mdss/Kconfig
index 424455f..56eb90c 100644
--- a/drivers/video/msm/mdss/Kconfig
+++ b/drivers/video/msm/mdss/Kconfig
@@ -11,3 +11,12 @@
---help---
The MDSS HDMI Panel provides support for transmitting TMDS signals of
MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_8334
+ depends on FB_MSM_MDSS_HDMI_PANEL
+ bool 'MHL SII8334 support '
+ default n
+ ---help---
+ Support the HDMI to MHL conversion.
+ MHL (Mobile High-Definition Link) technology
+ uses USB connector to output HDMI content
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index b4bd31e..88a7c45 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -18,5 +18,6 @@
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_8334) += mhl_sii8334.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 3c60c2b..d041125 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -21,6 +21,8 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <mach/iommu_domains.h>
+
#define MDSS_REG_WRITE(addr, val) writel_relaxed(val, mdss_res->mdp_base + addr)
#define MDSS_REG_READ(addr) readl_relaxed(mdss_res->mdp_base + addr)
@@ -34,6 +36,21 @@
MDSS_MAX_CLK
};
+enum mdss_iommu_domain_type {
+ MDSS_IOMMU_DOMAIN_SECURE,
+ MDSS_IOMMU_DOMAIN_UNSECURE,
+ MDSS_IOMMU_MAX_DOMAIN
+};
+
+struct mdss_iommu_map_type {
+ char *client_name;
+ char *ctx_name;
+ struct device *ctx;
+ struct msm_iova_partition partitions[1];
+ int npartitions;
+ int domain_idx;
+};
+
struct mdss_data_type {
u32 rev;
u32 mdp_rev;
@@ -72,8 +89,8 @@
u32 *mixer_type_map;
struct ion_client *iclient;
- int iommu_domain;
int iommu_attached;
+ struct mdss_iommu_map_type *iommu_map;
struct early_suspend early_suspend;
};
@@ -112,14 +129,14 @@
return mdss_res->iommu_attached;
}
-static inline int mdss_get_iommu_domain(void)
+static inline int mdss_get_iommu_domain(u32 type)
{
+ if (type >= MDSS_IOMMU_MAX_DOMAIN)
+ return -EINVAL;
+
if (!mdss_res)
return -ENODEV;
- return mdss_res->iommu_domain;
+ return mdss_res->iommu_map[type].domain_idx;
}
-
-int mdss_iommu_attach(void);
-int mdss_iommu_dettach(void);
#endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8f4f4d5..980ed46 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -327,6 +327,26 @@
return ret;
}
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_dsi_on(pdata);
+ break;
+ case MDSS_EVENT_BLANK:
+ rc = mdss_dsi_ctrl_unprepare(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_dsi_off(pdata);
+ break;
+ }
+ return rc;
+}
+
static int mdss_dsi_resource_initialized;
static int __devinit mdss_dsi_probe(struct platform_device *pdev)
@@ -476,9 +496,7 @@
if (!ctrl_pdata)
return -ENOMEM;
- (ctrl_pdata->panel_data).on = mdss_dsi_on;
- (ctrl_pdata->panel_data).off = mdss_dsi_off;
- (ctrl_pdata->panel_data).intf_unprepare = mdss_dsi_ctrl_unprepare;
+ ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
memcpy(&((ctrl_pdata->panel_data).panel_info),
&(panel_data->panel_info),
sizeof(struct mdss_panel_info));
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 125644e..8c3b1a8 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -1199,6 +1199,7 @@
{
int len;
int i;
+ int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
char *bp;
unsigned long size, addr;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -1229,7 +1230,7 @@
if (is_mdss_iommu_attached()) {
int ret = msm_iommu_map_contig_buffer(tp->dmap,
- mdss_get_iommu_domain(), 0,
+ mdss_get_iommu_domain(domain), 0,
size, SZ_4K, 0, &(addr));
if (IS_ERR_VALUE(ret)) {
pr_err("unable to map dma memory to iommu(%d)\n", ret);
@@ -1251,8 +1252,8 @@
wait_for_completion(&dsi_dma_comp);
if (is_mdss_iommu_attached())
- msm_iommu_unmap_contig_buffer(addr, mdss_get_iommu_domain(),
- 0, size);
+ msm_iommu_unmap_contig_buffer(addr,
+ mdss_get_iommu_domain(domain), 0, size);
dma_unmap_single(&dsi_dev, tp->dmap, size, DMA_TO_DEVICE);
tp->dmap = 0;
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index 1d7a6fe..1cf3101 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -353,6 +353,23 @@
return ret;
}
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_edp_on(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_edp_off(pdata);
+ break;
+ }
+ return rc;
+}
+
/*
* Converts from EDID struct to mdss_panel_info
*/
@@ -413,8 +430,7 @@
edp_drv->panel_data.panel_info.bl_min = 1;
edp_drv->panel_data.panel_info.bl_max = 255;
- edp_drv->panel_data.on = mdss_edp_on;
- edp_drv->panel_data.off = mdss_edp_off;
+ edp_drv->panel_data.event_handler = mdss_edp_event_handler;
edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
ret = mdss_register_panel(&edp_drv->panel_data);
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index b711fd9..4ec4046 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -325,6 +325,24 @@
return 0;
}
+static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+{
+ struct mdss_panel_data *pdata;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending event=%d for fb%d\n", e, mfd->index);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, e, NULL);
+
+ return 0;
+}
+
static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
@@ -334,6 +352,12 @@
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+ if (ret) {
+ pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_on = mfd->panel_power_on;
@@ -359,6 +383,12 @@
pr_debug("mdss_fb resume index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+ if (ret) {
+ pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
/* resume state var recover */
mfd->op_enable = mfd->suspend.op_enable;
@@ -691,6 +721,7 @@
size *= mfd->fb_page;
if (mfd->index == 0) {
+ int dom;
virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
if (!virt) {
pr_err("unable to alloc fbmem size=%u\n", size);
@@ -698,9 +729,9 @@
}
phys = memory_pool_node_paddr(virt);
if (is_mdss_iommu_attached()) {
- msm_iommu_map_contig_buffer(phys,
- mdss_get_iommu_domain(), 0, size, SZ_4K, 0,
- &(mfd->iova));
+ dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+ msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
+ 0, &(mfd->iova));
}
pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
size, virt, phys, mfd->index);
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 78f2b9a..b760388 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -93,6 +93,7 @@
u32 bl_scale;
u32 bl_min_lvl;
struct mutex lock;
+ struct mutex ov_lock;
struct platform_device *pdev;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index d932bc9..539cd49 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -66,6 +66,21 @@
.irq_handler = hdmi_tx_isr,
};
+struct dss_gpio hpd_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-hpd"},
+ {0, 1, COMPATIBLE_NAME "-ddc-clk"},
+ {0, 1, COMPATIBLE_NAME "-ddc-data"},
+ {0, 1, COMPATIBLE_NAME "-mux-en"},
+ {0, 0, COMPATIBLE_NAME "-mux-sel"}
+};
+
+struct dss_gpio core_gpio_config[] = {
+};
+
+struct dss_gpio cec_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-cec"}
+};
+
const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
{
switch (module) {
@@ -497,6 +512,9 @@
DEV_INFO("%s: Hdmi state switch to %d\n", __func__,
hdmi_ctrl->sdev.state);
}
+
+ if (!completion_done(&hdmi_ctrl->hpd_done))
+ complete_all(&hdmi_ctrl->hpd_done);
} /* hdmi_tx_hpd_int_work */
static int hdmi_tx_check_capability(struct dss_io_data *io)
@@ -1764,7 +1782,6 @@
mutex_lock(&hdmi_ctrl->mutex);
hdmi_ctrl->panel_power_on = true;
- /* todo: check hdmi_tx_is_controller_on when hpd is on */
if (hdmi_ctrl->hpd_state) {
DEV_DBG("%s: Turning HDMI on\n", __func__);
mutex_unlock(&hdmi_ctrl->mutex);
@@ -1984,9 +2001,13 @@
hdmi_ctrl->ddc_ctrl.io = &pdata->io[HDMI_TX_CORE_IO];
init_completion(&hdmi_ctrl->ddc_ctrl.ddc_sw_done);
+ hdmi_ctrl->panel_power_on = false;
+ hdmi_ctrl->panel_suspend = false;
+
hdmi_ctrl->hpd_state = false;
hdmi_ctrl->hpd_initialized = false;
hdmi_ctrl->hpd_off_pending = false;
+ init_completion(&hdmi_ctrl->hpd_done);
INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
INIT_WORK(&hdmi_ctrl->power_off_work, hdmi_tx_power_off_work);
@@ -2020,6 +2041,101 @@
return rc;
} /* hdmi_tx_dev_init */
+static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
+ int event, void *arg)
+{
+ int rc = 0;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: event = %d suspend=%d, hpd_feature=%d\n", __func__,
+ event, hdmi_ctrl->panel_suspend, hdmi_ctrl->hpd_feature_on);
+
+ switch (event) {
+ case MDSS_EVENT_RESUME:
+ if (hdmi_ctrl->hpd_feature_on) {
+ INIT_COMPLETION(hdmi_ctrl->hpd_done);
+
+ rc = hdmi_tx_hpd_on(hdmi_ctrl);
+ if (rc)
+ DEV_ERR("%s: hdmi_tx_hpd_on failed. rc=%d\n",
+ __func__, rc);
+ }
+ break;
+
+ case MDSS_EVENT_RESET:
+ if (hdmi_ctrl->panel_suspend) {
+ u32 timeout;
+ hdmi_ctrl->panel_suspend = false;
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &hdmi_ctrl->hpd_done, HZ/10);
+ if (!timeout & !hdmi_ctrl->hpd_state) {
+ DEV_INFO("%s: cable removed during suspend\n",
+ __func__);
+
+ kobject_uevent(hdmi_ctrl->kobj, KOBJ_OFFLINE);
+ switch_set_state(&hdmi_ctrl->sdev, 0);
+
+ rc = -EPERM;
+ } else {
+ DEV_DBG("%s: cable present after resume\n",
+ __func__);
+ }
+ }
+ break;
+
+ case MDSS_EVENT_UNBLANK:
+ rc = hdmi_tx_power_on(panel_data);
+ if (rc)
+ DEV_ERR("%s: hdmi_tx_power_on failed. rc=%d\n",
+ __func__, rc);
+ break;
+
+ case MDSS_EVENT_TIMEGEN_ON:
+ break;
+
+ case MDSS_EVENT_SUSPEND:
+ if (!hdmi_ctrl->panel_power_on) {
+ if (hdmi_ctrl->hpd_feature_on)
+ hdmi_tx_hpd_off(hdmi_ctrl);
+ else
+ DEV_ERR("%s: invalid state\n", __func__);
+
+ hdmi_ctrl->panel_suspend = false;
+ } else {
+ hdmi_ctrl->hpd_off_pending = true;
+ hdmi_ctrl->panel_suspend = true;
+ }
+ break;
+
+ case MDSS_EVENT_BLANK:
+ if (hdmi_ctrl->panel_power_on) {
+ rc = hdmi_tx_power_off(panel_data);
+ if (rc)
+ DEV_ERR("%s: hdmi_tx_power_off failed.rc=%d\n",
+ __func__, rc);
+
+ } else {
+ DEV_DBG("%s: hdmi is already powered off\n", __func__);
+ }
+ break;
+
+ case MDSS_EVENT_TIMEGEN_OFF:
+ /* If a power off is already underway, wait for it to finish */
+ if (hdmi_ctrl->panel_suspend)
+ flush_work_sync(&hdmi_ctrl->power_off_work);
+ break;
+ }
+
+ return rc;
+} /* hdmi_tx_panel_event_handler */
+
static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc = 0;
@@ -2029,8 +2145,7 @@
return -EINVAL;
}
- hdmi_ctrl->panel_data.on = hdmi_tx_power_on;
- hdmi_ctrl->panel_data.off = hdmi_tx_power_off;
+ hdmi_ctrl->panel_data.event_handler = hdmi_tx_panel_event_handler;
hdmi_ctrl->video_resolution = DEFAULT_VIDEO_RESOLUTION;
rc = hdmi_tx_init_panel_info(hdmi_ctrl->video_resolution,
@@ -2433,29 +2548,32 @@
static int hdmi_tx_get_dt_gpio_data(struct device *dev,
struct dss_module_power *mp, u32 module_type)
{
- int i, j, rc = 0;
- int dt_gpio_total = 0, mod_gpio_total = 0;
- u32 ndx_mask = 0;
- const char *mod_name = NULL;
+ int i, j;
+ int mp_gpio_cnt = 0, gpio_list_size = 0;
+ struct dss_gpio *gpio_list = NULL;
struct device_node *of_node = NULL;
- char prop_name[32];
- snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME, "gpio-names");
+
+ DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
if (!dev || !mp) {
DEV_ERR("%s: invalid input\n", __func__);
- rc = -EINVAL;
- goto error;
+ return -EINVAL;
}
+ of_node = dev->of_node;
+
switch (module_type) {
case HDMI_TX_HPD_PM:
- mod_name = "hpd";
+ gpio_list_size = ARRAY_SIZE(hpd_gpio_config);
+ gpio_list = hpd_gpio_config;
break;
case HDMI_TX_CORE_PM:
- mod_name = "core";
+ gpio_list_size = ARRAY_SIZE(core_gpio_config);
+ gpio_list = core_gpio_config;
break;
case HDMI_TX_CEC_PM:
- mod_name = "cec";
+ gpio_list_size = ARRAY_SIZE(cec_gpio_config);
+ gpio_list = cec_gpio_config;
break;
default:
DEV_ERR("%s: invalid module type=%d\n", __func__,
@@ -2463,90 +2581,49 @@
return -EINVAL;
}
- DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+ for (i = 0; i < gpio_list_size; i++)
+ if (of_find_property(of_node, gpio_list[i].gpio_name, NULL))
+ mp_gpio_cnt++;
- of_node = dev->of_node;
-
- dt_gpio_total = of_gpio_count(of_node);
- if (dt_gpio_total < 0) {
- DEV_ERR("%s: gpio not found. rc=%d\n", __func__,
- dt_gpio_total);
- rc = dt_gpio_total;
- goto error;
- }
-
- /* count how many gpio for particular hdmi module */
- for (i = 0; i < dt_gpio_total; i++) {
- const char *st = NULL;
-
- rc = of_property_read_string_index(of_node,
- prop_name, i, &st);
- if (rc) {
- DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
- __func__, i, rc);
- goto error;
- }
-
- if (strnstr(st, mod_name, strlen(st))) {
- ndx_mask |= BIT(i);
- mod_gpio_total++;
- }
- }
-
- if (mod_gpio_total > 0) {
- mp->num_gpio = mod_gpio_total;
- mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
- mod_gpio_total, GFP_KERNEL);
- if (!mp->gpio_config) {
- DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
- hdmi_tx_pm_name(module_type));
- goto error;
- }
- } else {
+ if (!mp_gpio_cnt) {
DEV_DBG("%s: no gpio\n", __func__);
return 0;
}
+ DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt);
+ mp->num_gpio = mp_gpio_cnt;
- for (i = 0, j = 0; (i < dt_gpio_total) && (j < mod_gpio_total); i++) {
- const char *st = NULL;
+ mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
+ mp_gpio_cnt, GFP_KERNEL);
+ if (!mp->gpio_config) {
+ DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
+ hdmi_tx_pm_name(module_type));
- if (!(ndx_mask & BIT(0))) {
- ndx_mask >>= 1;
+ mp->num_gpio = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < gpio_list_size; i++) {
+ int gpio = of_get_named_gpio(of_node,
+ gpio_list[i].gpio_name, 0);
+ if (gpio < 0) {
+ DEV_DBG("%s: no gpio named %s\n", __func__,
+ gpio_list[i].gpio_name);
continue;
}
+ memcpy(&mp->gpio_config[j], &gpio_list[i],
+ sizeof(struct dss_gpio));
- /* gpio-name */
- rc = of_property_read_string_index(of_node,
- prop_name, i, &st);
- if (rc) {
- DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
- __func__, i, rc);
- goto error;
- }
- snprintf(mp->gpio_config[j].gpio_name, 32, "%s", st);
+ mp->gpio_config[j].gpio = (unsigned)gpio;
- /* gpio-number */
- mp->gpio_config[j].gpio = of_get_gpio(of_node, i);
-
- DEV_DBG("%s: gpio num=%d, name=%s\n", __func__,
- mp->gpio_config[j].gpio,
- mp->gpio_config[j].gpio_name);
-
- ndx_mask >>= 1;
+ DEV_DBG("%s: gpio num=%d, name=%s, value=%d\n",
+ __func__, mp->gpio_config[j].gpio,
+ mp->gpio_config[j].gpio_name,
+ mp->gpio_config[j].value);
j++;
}
- return rc;
-
-error:
- if (mp->gpio_config) {
- devm_kfree(dev, mp->gpio_config);
- mp->gpio_config = NULL;
- }
- mp->num_gpio = 0;
-
- return rc;
+ return 0;
} /* hdmi_tx_get_dt_gpio_data */
static void hdmi_tx_put_dt_data(struct device *dev,
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index ce19355..2d431b7 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -51,11 +51,13 @@
uint32_t video_resolution;
u32 panel_power_on;
+ u32 panel_suspend;
- u32 hpd_initialized;
u32 hpd_state;
u32 hpd_off_pending;
u32 hpd_feature_on;
+ u32 hpd_initialized;
+ struct completion hpd_done;
struct work_struct hpd_int_work;
struct work_struct power_off_work;
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index 0a14056..2bf2d74 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include "mdss_io_util.h"
+#define MAX_I2C_CMDS 16
void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
{
u32 in_val;
@@ -248,6 +249,10 @@
int i = 0, rc = 0;
if (enable) {
for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
rc = gpio_request(in_gpio[i].gpio,
in_gpio[i].gpio_name);
if (rc < 0) {
@@ -256,10 +261,16 @@
in_gpio[i].gpio_name);
goto disable_gpio;
}
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
}
} else {
- for (i = num_gpio-1; i >= 0; i--)
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
gpio_free(in_gpio[i].gpio);
+ }
}
return rc;
@@ -382,3 +393,59 @@
return rc;
} /* msm_dss_enable_clk */
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = ®_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 51e9e54..85826f7 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -16,6 +16,8 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
#ifdef DEBUG
#define DEV_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -56,6 +58,7 @@
struct dss_gpio {
unsigned gpio;
+ unsigned value;
char gpio_name[32];
};
@@ -97,4 +100,9 @@
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value);
+
#endif /* __MDSS_IO_UTIL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 11b0831..bcb3aee 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -95,28 +95,29 @@
.name = "mdss_mdp",
};
-struct msm_iova_partition mdp_iommu_partitions[] = {
- {
- .start = SZ_128K,
- .size = SZ_2G - SZ_128K,
+struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
+ [MDSS_IOMMU_DOMAIN_UNSECURE] = {
+ .client_name = "mdp_ns",
+ .ctx_name = "mdp_0",
+ .partitions = {
+ {
+ .start = SZ_128K,
+ .size = SZ_1G - SZ_128K,
+ },
+ },
+ .npartitions = 1,
},
-};
-struct msm_iova_layout mdp_iommu_layout = {
- .client_name = "mdss_mdp",
- .partitions = mdp_iommu_partitions,
- .npartitions = ARRAY_SIZE(mdp_iommu_partitions),
-};
-
-struct {
- char *name;
- struct device *ctx;
-} mdp_iommu_ctx[] = {
- {
- .name = "mdp_0",
+ [MDSS_IOMMU_DOMAIN_SECURE] = {
+ .client_name = "mdp_secure",
+ .ctx_name = "mdp_1",
+ .partitions = {
+ {
+ .start = SZ_1G,
+ .size = SZ_1G,
+ },
+ },
+ .npartitions = 1,
},
- {
- .name = "mdp_1",
- }
};
struct mdss_hw mdss_mdp_hw = {
@@ -670,85 +671,103 @@
return 0;
}
-int mdss_iommu_attach(void)
+int mdss_iommu_attach(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
- int i, domain_idx;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- if (mdss_res->iommu_attached) {
+ if (mdata->iommu_attached) {
pr_warn("mdp iommu already attached\n");
return 0;
}
- domain_idx = mdss_get_iommu_domain();
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ WARN(1, "could not attach iommu client %s to ctx %s\n",
+ iomap->client_name, iomap->ctx_name);
+ continue;
+ }
+ iommu_attach_device(domain, iomap->ctx);
}
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
- if (iommu_attach_device(domain, mdp_iommu_ctx[i].ctx)) {
- WARN(1, "could not attach iommu domain %d to ctx %s\n",
- domain_idx, mdp_iommu_ctx[i].name);
- return -EINVAL;
- }
- }
- mdss_res->iommu_attached = true;
+ mdata->iommu_attached = true;
return 0;
}
-int mdss_iommu_dettach(void)
+int mdss_iommu_dettach(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
- int i, domain_idx;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- if (!mdss_res->iommu_attached) {
+ if (!mdata->iommu_attached) {
pr_warn("mdp iommu already dettached\n");
return 0;
}
- domain_idx = mdss_get_iommu_domain();
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ continue;
+ }
+ iommu_detach_device(domain, iomap->ctx);
}
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++)
- iommu_detach_device(domain, mdp_iommu_ctx[i].ctx);
- mdss_res->iommu_attached = false;
+ mdata->iommu_attached = false;
return 0;
}
-int mdss_iommu_init(void)
+int mdss_iommu_init(struct mdss_data_type *mdata)
{
+ struct msm_iova_layout layout;
struct iommu_domain *domain;
- int domain_idx, i;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- domain_idx = msm_register_domain(&mdp_iommu_layout);
- if (IS_ERR_VALUE(domain_idx))
- return -EINVAL;
-
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ if (mdata->iommu_map) {
+ pr_warn("iommu already initialized\n");
+ return 0;
}
- iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = &mdss_iommu_map[i];
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
- mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
- if (!mdp_iommu_ctx[i].ctx) {
+ layout.client_name = iomap->client_name;
+ layout.partitions = iomap->partitions;
+ layout.npartitions = iomap->npartitions;
+ layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
+
+ iomap->domain_idx = msm_register_domain(&layout);
+ if (IS_ERR_VALUE(iomap->domain_idx))
+ return -EINVAL;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ return -EINVAL;
+ }
+ iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+
+ iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
+ if (!iomap->ctx) {
pr_warn("unable to get iommu ctx(%s)\n",
- mdp_iommu_ctx[i].name);
+ iomap->ctx_name);
return -EINVAL;
}
}
- mdss_res->iommu_domain = domain_idx;
+
+ mdata->iommu_map = mdss_iommu_map;
return 0;
}
@@ -815,9 +834,9 @@
mdata->iclient = NULL;
}
- rc = mdss_iommu_init();
+ rc = mdss_iommu_init(mdata);
if (!IS_ERR_VALUE(rc))
- mdss_iommu_attach();
+ mdss_iommu_attach(mdata);
rc = mdss_hw_init(mdata);
@@ -934,11 +953,11 @@
if (on && !mdata->fs_ena) {
pr_debug("Enable MDP FS\n");
regulator_enable(mdata->fs);
- mdss_iommu_attach();
+ mdss_iommu_attach(mdata);
mdata->fs_ena = true;
} else if (!on && mdata->fs_ena) {
pr_debug("Disable MDP FS\n");
- mdss_iommu_dettach();
+ mdss_iommu_dettach(mdata);
regulator_disable(mdata->fs);
mdata->fs_ena = false;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 72871aa..2e92591 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -284,13 +284,13 @@
void mdss_mdp_clk_ctrl(int enable, int isr);
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd);
int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg);
struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index f660375..00f5874 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,11 @@
bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
+
+ if ((bus_ib_quota == 0) && (clk_rate > 0)) {
+ /* allocate min bw for panel cmds if mdp is active */
+ bus_ib_quota = SZ_16M;
+ }
mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
}
if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
@@ -531,9 +536,28 @@
return 0;
}
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
{
struct mdss_panel_data *pdata;
+ if (!ctl || !ctl->mfd)
+ return -ENODEV;
+
+ pdata = dev_get_platdata(&ctl->mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, event, arg);
+
+ return 0;
+}
+
+int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
u32 outsize, temp, off;
@@ -545,12 +569,6 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
if (mdss_mdp_ctl_init(mfd)) {
pr_err("unable to initialize ctl\n");
return -ENODEV;
@@ -568,6 +586,12 @@
ctl->power_on = true;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
+ if (ret) {
+ pr_err("panel power on failed ctl=%d\n", ctl->num);
+ goto start_fail;
+ }
+
if (ctl->start_fnc)
ret = ctl->start_fnc(ctl);
else
@@ -579,17 +603,6 @@
goto start_fail;
}
- /* request bus bandwidth for panel commands */
- ctl->clk_rate = MDP_CLK_DEFAULT_RATE;
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
- ret = pdata->on(pdata);
- if (ret) {
- pr_err("panel power on failed ctl=%d\n", ctl->num);
- goto panel_fail;
- }
-
pr_debug("ctl_num=%d\n", ctl->num);
mixer = ctl->mixer_left;
@@ -617,23 +630,18 @@
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
}
-panel_fail:
- if (ret && ctl->stop_fnc)
- ctl->stop_fnc(ctl);
+
start_fail:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&ctl->lock);
- if (ret) {
+ if (ret)
mdss_mdp_ctl_destroy(mfd);
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
- }
return ret;
}
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd)
{
- struct mdss_panel_data *pdata;
struct mdss_mdp_ctl *ctl;
int ret = 0;
@@ -648,12 +656,6 @@
return -ENODEV;
}
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
ctl = mfd->ctl;
if (!ctl->power_on) {
@@ -663,43 +665,33 @@
pr_debug("ctl_num=%d\n", mfd->ctl->num);
- mdss_mdp_overlay_release_all(mfd);
-
- /* request bus bandwidth for panel commands */
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_lock(&ctl->lock);
- ctl->power_on = false;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (pdata->intf_unprepare)
- ret = pdata->intf_unprepare(pdata);
-
- if (ret)
- pr_err("%s: intf_unprepare failed\n", __func__);
-
if (ctl->stop_fnc)
ret = ctl->stop_fnc(ctl);
else
pr_warn("no stop func for ctl=%d\n", ctl->num);
- if (ret)
+ if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
-
- ret = pdata->off(pdata);
+ } else {
+ ctl->power_on = false;
+ ctl->play_cnt = 0;
+ ctl->clk_rate = 0;
+ mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
+ }
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- ctl->play_cnt = 0;
-
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_unlock(&ctl->lock);
- if (!mfd->ref_cnt)
+ if (!ret && !mfd->ref_cnt) {
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
+ WARN(ret, "unable to close intf %d\n", ctl->intf_num);
mdss_mdp_ctl_destroy(mfd);
+ }
return ret;
}
@@ -926,13 +918,16 @@
return -ENODEV;
}
- if (!ctl->power_on)
- return 0;
-
pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
- if (mutex_lock_interruptible(&ctl->lock))
- return -EINTR;
+ ret = mutex_lock_interruptible(&ctl->lock);
+ if (ret)
+ return ret;
+
+ if (!ctl->power_on) {
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
@@ -992,7 +987,7 @@
mutex_lock(&mdss_mdp_ctl_lock);
for (i = 0; i < MDSS_MDP_MAX_CTL; i++) {
ctl = &mdss_mdp_ctl_list[i];
- if ((ctl->power_on) &&
+ if ((ctl->power_on) && (ctl->mfd) &&
(ctl->mfd->index == fb_num)) {
if (ctl->mixer_left) {
mixer_id[mixer_cnt] = ctl->mixer_left->num;
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 1da30b8..b6ac126 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -166,6 +166,7 @@
#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
#define MDSS_MDP_REG_SSPP_FETCH_CONFIG 0x048
#define MDSS_MDP_REG_SSPP_VC1_RANGE 0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS 0x070
#define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR 0x0A4
#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 4d3fbf0..9508846 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -201,7 +201,7 @@
static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_video_ctx *ctx;
- int off;
+ int rc, off;
pr_debug("stop ctl=%d\n", ctl->num);
@@ -211,16 +211,27 @@
return -ENODEV;
}
- if (ctx->vsync_handler)
- mdss_mdp_video_set_vsync_handler(ctl, NULL);
-
if (ctx->timegen_en) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+ if (rc == -EBUSY) {
+ pr_debug("intf #%d busy don't turn off\n",
+ ctl->intf_num);
+ return rc;
+ }
+ WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ctx->timegen_en = false;
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_OFF, NULL);
+ WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
}
+ if (ctx->vsync_handler)
+ mdss_mdp_video_set_vsync_handler(ctl, NULL);
+
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
NULL, NULL);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
@@ -288,6 +299,7 @@
static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_video_ctx *ctx;
+ int rc;
pr_debug("kickoff ctl=%d\n", ctl->num);
@@ -306,15 +318,23 @@
if (!ctx->timegen_en) {
int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+ WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
- ctx->timegen_en = true;
wmb();
}
wait_for_completion(&ctx->vsync_comp);
+
+ if (!ctx->timegen_en) {
+ ctx->timegen_en = true;
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_ON, NULL);
+ WARN(rc, "intf %d timegen on error (%d)\n", ctl->intf_num, rc);
+ }
if (!ctx->vsync_handler)
mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
mutex_unlock(&ctx->vsync_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index c9acc65..a1f1bcc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -245,7 +245,7 @@
ctx->format = rot->format;
- ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+ ctx->rot90 = !!(rot->flags & MDP_ROT_90);
if (ctx->rot90) {
ctx->opmode |= BIT(5); /* ROT 90 */
swap(ctx->width, ctx->height);
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 569e381..f537c39 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,8 @@
#define CHECK_BOUNDS(offset, size, max_size) \
(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+static atomic_t ov_active_panels = ATOMIC_INIT(0);
+
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
@@ -196,7 +198,9 @@
return -EINVAL;
}
- rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+ /* keep only flags of interest to rotator */
+ rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
+ MDP_SECURE_OVERLAY_SESSION);
rot->format = fmt->format;
rot->img_width = req->src.width;
@@ -207,7 +211,7 @@
rot->src_rect.h = req->src_rect.h;
if (req->flags & MDP_DEINTERLACE) {
- rot->rotations |= MDP_DEINTERLACE;
+ rot->flags |= MDP_DEINTERLACE;
rot->src_rect.h /= 2;
}
@@ -356,8 +360,14 @@
{
int ret;
- if (!mfd->panel_power_on)
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
return -EPERM;
+ }
if (req->flags & MDSS_MDP_ROT_ONLY) {
ret = mdss_mdp_overlay_rotator_setup(mfd, req);
@@ -372,18 +382,22 @@
req->z_order -= MDSS_MDP_STAGE_0;
}
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
static inline int mdss_mdp_overlay_get_buf(struct msm_fb_data_type *mfd,
struct mdss_mdp_data *data,
struct msmfb_data *planes,
- int num_planes)
+ int num_planes,
+ u32 flags)
{
int i;
memset(data, 0, sizeof(*data));
for (i = 0; i < num_planes; i++) {
+ data->p[i].flags = flags;
mdss_mdp_get_img(&planes[i], &data->p[i]);
if (data->p[0].len == 0)
break;
@@ -411,35 +425,18 @@
return 0;
}
-static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe, *tmp;
- struct msm_fb_data_type *mfd = ctl->mfd;
- int i, ret;
+ LIST_HEAD(destroy_pipes);
+ int i;
- if (mfd->kickoff_fnc)
- ret = mfd->kickoff_fnc(ctl);
- else
- ret = mdss_mdp_display_commit(ctl, NULL);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- complete(&mfd->update.comp);
- mutex_lock(&mfd->no_update.lock);
- if (mfd->no_update.timer.function)
- del_timer(&(mfd->no_update.timer));
-
- mfd->no_update.timer.expires = jiffies + (2 * HZ);
- add_timer(&mfd->no_update.timer);
- mutex_unlock(&mfd->no_update.lock);
-
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
- list_del(&pipe->cleanup_list);
+ list_move(&pipe->cleanup_list, &destroy_pipes);
for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
-
- mdss_mdp_pipe_destroy(pipe);
}
if (!list_empty(&mfd->pipes_used)) {
@@ -458,36 +455,44 @@
}
}
mutex_unlock(&mfd->lock);
+ list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+ mdss_mdp_pipe_destroy(pipe);
+ mutex_unlock(&mfd->ov_lock);
+
+ return 0;
+}
+
+static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+{
+ struct msm_fb_data_type *mfd = ctl->mfd;
+ int ret;
+
+ if (mfd->kickoff_fnc)
+ ret = mfd->kickoff_fnc(ctl);
+ else
+ ret = mdss_mdp_display_commit(ctl, NULL);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ complete(&mfd->update.comp);
+ mutex_lock(&mfd->no_update.lock);
+ if (mfd->no_update.timer.function)
+ del_timer(&(mfd->no_update.timer));
+
+ mfd->no_update.timer.expires = jiffies + (2 * HZ);
+ add_timer(&mfd->no_update.timer);
+ mutex_unlock(&mfd->no_update.lock);
+
+ ret = mdss_mdp_overlay_cleanup(mfd);
return ret;
}
-static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
{
struct mdss_mdp_pipe *pipe;
- int i, ret = 0;
u32 pipe_ndx, unset_ndx = 0;
-
- if (!mfd || !mfd->ctl)
- return -ENODEV;
-
- pr_debug("unset ndx=%x\n", ndx);
-
- if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(ndx);
- if (rot) {
- mdss_mdp_rotator_finish(rot);
- } else {
- pr_warn("unknown session id=%x\n", ndx);
- ret = -ENODEV;
- }
-
- return ret;
- }
-
- if (!mfd->ctl->power_on)
- return 0;
+ int i;
for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
pipe_ndx = BIT(i);
@@ -505,37 +510,59 @@
mdss_mdp_mixer_pipe_unstage(pipe);
}
}
+ return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+ int ret = 0;
+
+ if (!mfd || !mfd->ctl)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ pr_debug("unset ndx=%x\n", ndx);
+
+ if (ndx & MDSS_MDP_ROT_SESSION_MASK)
+ ret = mdss_mdp_rotator_release(ndx);
+ else
+ ret = mdss_mdp_overlay_release(mfd, ndx);
+
+ mutex_unlock(&mfd->ov_lock);
return ret;
}
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+static int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
u32 unset_ndx = 0;
int cnt = 0;
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
- if (!list_empty(&mfd->pipes_used)) {
- list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- if (pipe->ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(pipe->ndx);
- if (rot)
- mdss_mdp_rotator_finish(rot);
- } else {
- unset_ndx |= pipe->ndx;
- cnt++;
- }
- }
+ list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+ unset_ndx |= pipe->ndx;
+ cnt++;
}
mutex_unlock(&mfd->lock);
if (unset_ndx) {
pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
- mdss_mdp_overlay_unset(mfd, unset_ndx);
- mdss_mdp_overlay_kickoff(mfd->ctl);
+ mdss_mdp_overlay_release(mfd, unset_ndx);
}
+ mutex_unlock(&mfd->ov_lock);
+
+ if (cnt)
+ mdss_mdp_overlay_kickoff(mfd->ctl);
return 0;
}
@@ -561,26 +588,28 @@
struct mdss_mdp_rotator_session *rot;
struct mdss_mdp_data src_data, dst_data;
int ret;
+ u32 flgs;
- ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1);
+ rot = mdss_mdp_rotator_session_get(req->id);
+ if (!rot) {
+ pr_err("invalid session id=%x\n", req->id);
+ return -ENOENT;
+ }
+
+ flgs = rot->flags & MDP_SECURE_OVERLAY_SESSION;
+
+ ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1, flgs);
if (ret) {
pr_err("src_data pmem error\n");
goto rotate_done;
}
- ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1);
+ ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1, flgs);
if (ret) {
pr_err("dst_data pmem error\n");
goto rotate_done;
}
- rot = mdss_mdp_rotator_session_get(req->id);
- if (!rot) {
- pr_err("invalid session id=%x\n", req->id);
- ret = -ENODEV;
- goto rotate_done;
- }
-
ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
if (ret) {
pr_err("rotator queue error session id=%x\n", req->id);
@@ -601,6 +630,7 @@
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_data *src_data;
int ret, buf_ndx;
+ u32 flags;
pipe = mdss_mdp_pipe_get_locked(req->id);
if (pipe == NULL) {
@@ -610,11 +640,13 @@
pr_debug("ov queue pnum=%d\n", pipe->num);
+ flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
+
buf_ndx = (pipe->play_cnt + 1) & 1; /* next buffer */
src_data = &pipe->buffers[buf_ndx];
mdss_mdp_overlay_free_buf(src_data);
- ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1);
+ ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret)) {
pr_err("src_data pmem error\n");
} else {
@@ -625,9 +657,6 @@
ctl = pipe->mixer->ctl;
mdss_mdp_pipe_unlock(pipe);
- if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL))
- ret = mdss_mdp_overlay_kickoff(ctl);
-
return ret;
}
@@ -638,14 +667,29 @@
pr_debug("play req id=%x\n", req->id);
- if (!mfd->panel_power_on)
- return -EPERM;
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
- if (req->id & MDSS_MDP_ROT_SESSION_MASK)
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
ret = mdss_mdp_overlay_rotate(mfd, req);
- else
+ } else {
ret = mdss_mdp_overlay_queue(mfd, req);
+ if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL)) {
+ mutex_unlock(&mfd->ov_lock);
+ ret = mdss_mdp_overlay_kickoff(mfd->ctl);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -719,10 +763,7 @@
u32 offset;
int bpp, ret;
- if (!mfd)
- return;
-
- if (!mfd->ctl || !mfd->panel_power_on)
+ if (!mfd || !mfd->ctl)
return;
fbi = mfd->fbi;
@@ -732,6 +773,14 @@
return;
}
+ if (mutex_lock_interruptible(&mfd->ov_lock))
+ return;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return;
+ }
+
memset(&data, 0, sizeof(data));
bpp = fbi->var.bits_per_pixel / 8;
@@ -782,6 +831,7 @@
return;
}
}
+ mutex_unlock(&mfd->ov_lock);
if (fbi->var.activate & FB_ACTIVATE_VBL)
mdss_mdp_overlay_kickoff(mfd->ctl);
@@ -853,9 +903,9 @@
}
ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
- mdss_get_iommu_domain(), 0,
- MDSS_MDP_CURSOR_SIZE, SZ_4K,
- 0, &(mfd->cursor_buf_iova));
+ mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
+ 0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
+ &(mfd->cursor_buf_iova));
if (IS_ERR_VALUE(ret)) {
dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
mfd->cursor_buf,
@@ -1076,10 +1126,36 @@
return ret;
}
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ rc = mdss_mdp_ctl_on(mfd);
+ if (rc == 0)
+ atomic_inc(&ov_active_panels);
+
+ return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ mdss_mdp_overlay_release_all(mfd);
+
+ rc = mdss_mdp_ctl_off(mfd);
+ if (rc == 0) {
+ if (atomic_dec_return(&ov_active_panels) == 0)
+ mdss_mdp_rotator_release_all();
+ }
+
+ return rc;
+}
+
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
- mfd->on_fnc = mdss_mdp_ctl_on;
- mfd->off_fnc = mdss_mdp_ctl_off;
+ mfd->on_fnc = mdss_mdp_overlay_on;
+ mfd->off_fnc = mdss_mdp_overlay_off;
mfd->hw_refresh = true;
mfd->do_histogram = NULL;
mfd->overlay_play_enable = true;
@@ -1092,6 +1168,7 @@
INIT_LIST_HEAD(&mfd->pipes_used);
INIT_LIST_HEAD(&mfd->pipes_cleanup);
+ mutex_init(&mfd->ov_lock);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 3b04633..459cf14 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -268,9 +268,7 @@
atomic_read(&pipe->ref_cnt));
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- mutex_lock(&mdss_mdp_sspp_lock);
mdss_mdp_pipe_free(pipe);
- mutex_unlock(&mdss_mdp_sspp_lock);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return 0;
@@ -522,9 +520,13 @@
{
struct mdss_mdp_format_params *fmt;
u32 opmode, chroma_samp, unpack, src_format;
+ u32 secure = 0;
fmt = pipe->src_fmt;
+ if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
opmode = pipe->bwc_mode;
if (pipe->flags & MDP_FLIP_LR)
opmode |= MDSS_MDP_OP_FLIP_LR;
@@ -571,6 +573,7 @@
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index dc1cb0d..1e58269 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,12 +187,17 @@
{
struct mdss_mdp_pipe *rot_pipe;
struct mdss_mdp_ctl *ctl;
- int ret;
+ int ret, need_wait = false;
- if (!rot)
+ ret = mutex_lock_interruptible(&rotator_lock);
+ if (ret)
+ return ret;
+
+ if (!rot || !rot->ref_cnt) {
+ mutex_unlock(&rotator_lock);
return -ENODEV;
+ }
- mutex_lock(&rotator_lock);
ret = mdss_mdp_rotator_pipe_dequeue(rot);
if (ret) {
pr_err("unable to acquire rotator\n");
@@ -207,7 +212,7 @@
if (rot->params_changed) {
rot->params_changed = 0;
- rot_pipe->flags = rot->rotations;
+ rot_pipe->flags = rot->flags;
rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
rot_pipe->img_width = rot->img_width;
rot_pipe->img_height = rot->img_height;
@@ -225,16 +230,18 @@
ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
+ if (ret == 0 && !rot->no_wait)
+ need_wait = true;
done:
mutex_unlock(&rotator_lock);
- if (!rot->no_wait)
+ if (need_wait)
mdss_mdp_rotator_busy_wait(rot);
return ret;
}
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
{
struct mdss_mdp_pipe *rot_pipe;
@@ -243,7 +250,6 @@
pr_debug("finish rot id=%x\n", rot->session_id);
- mutex_lock(&rotator_lock);
rot_pipe = rot->pipe;
if (rot_pipe) {
mdss_mdp_rotator_busy_wait(rot);
@@ -255,7 +261,43 @@
mdss_mdp_pipe_destroy(rot_pipe);
mdss_mdp_wb_mixer_destroy(mixer);
}
+
+ return 0;
+}
+
+int mdss_mdp_rotator_release(u32 ndx)
+{
+ struct mdss_mdp_rotator_session *rot;
+ mutex_lock(&rotator_lock);
+ rot = mdss_mdp_rotator_session_get(ndx);
+ if (rot) {
+ mdss_mdp_rotator_finish(rot);
+ } else {
+ pr_warn("unknown session id=%x\n", ndx);
+ return -ENOENT;
+ }
mutex_unlock(&rotator_lock);
return 0;
}
+
+int mdss_mdp_rotator_release_all(void)
+{
+ struct mdss_mdp_rotator_session *rot;
+ int i, cnt;
+
+ mutex_lock(&rotator_lock);
+ for (i = 0, cnt = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+ rot = &rotator_session[i];
+ if (rot->ref_cnt) {
+ mdss_mdp_rotator_finish(rot);
+ cnt++;
+ }
+ }
+ mutex_unlock(&rotator_lock);
+
+ if (cnt)
+ pr_debug("cleaned up %d rotator sessions\n", cnt);
+
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index eb5b47a..70ef6bf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
u32 params_changed;
u32 format;
- u32 rotations;
+ u32 flags;
u16 img_width;
u16 img_height;
@@ -48,7 +48,8 @@
int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
-int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release_all(void);
#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index ee9582a..9f2df85 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -316,9 +316,20 @@
} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr);
- if (is_mdss_iommu_attached())
+ if (is_mdss_iommu_attached()) {
+ int domain;
+ if (data->flags & MDP_SECURE_OVERLAY_SESSION)
+ domain = MDSS_IOMMU_DOMAIN_SECURE;
+ else
+ domain = MDSS_IOMMU_DOMAIN_UNSECURE;
ion_unmap_iommu(iclient, data->srcp_ihdl,
- mdss_get_iommu_domain(), 0);
+ mdss_get_iommu_domain(domain), 0);
+
+ if (domain == MDSS_IOMMU_DOMAIN_SECURE) {
+ msm_ion_unsecure_buffer(iclient,
+ data->srcp_ihdl);
+ }
+ }
ion_free(iclient, data->srcp_ihdl);
data->srcp_ihdl = NULL;
@@ -339,7 +350,7 @@
start = (unsigned long *) &data->addr;
len = (unsigned long *) &data->len;
- data->flags = img->flags;
+ data->flags |= img->flags;
data->p_need = 0;
if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -374,8 +385,24 @@
}
if (is_mdss_iommu_attached()) {
+ int domain;
+ if (data->flags & MDP_SECURE_OVERLAY_SESSION) {
+ domain = MDSS_IOMMU_DOMAIN_SECURE;
+ ret = msm_ion_secure_buffer(iclient,
+ data->srcp_ihdl, 0x2,
+ ION_UNSECURE_DELAYED);
+ if (IS_ERR_VALUE(ret)) {
+ ion_free(iclient, data->srcp_ihdl);
+ pr_err("failed to secure handle (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+ }
+
ret = ion_map_iommu(iclient, data->srcp_ihdl,
- mdss_get_iommu_domain(),
+ mdss_get_iommu_domain(domain),
0, SZ_4K, 0, start, len, 0,
ION_IOMMU_UNMAP_DELAYED);
} else {
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index b18efbe..b74523b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -96,8 +96,9 @@
ion_phys(iclient, ihdl, &mdss_wb_mem, &img_size);
if (is_mdss_iommu_attached()) {
+ int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
rc = ion_map_iommu(iclient, ihdl,
- mdss_get_iommu_domain(),
+ mdss_get_iommu_domain(domain),
0, SZ_4K, 0,
(unsigned long *) &img->addr,
(unsigned long *) &img->len,
@@ -569,6 +570,6 @@
int msm_fb_get_iommu_domain(void)
{
- return mdss_get_iommu_domain();
+ return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
}
EXPORT_SYMBOL(msm_fb_get_iommu_domain);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 5cdfe34..28d7051 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -55,6 +55,17 @@
MAX_PHYS_TARGET_NUM,
};
+enum mdss_intf_events {
+ MDSS_EVENT_RESET,
+ MDSS_EVENT_UNBLANK,
+ MDSS_EVENT_TIMEGEN_ON,
+ MDSS_EVENT_BLANK,
+ MDSS_EVENT_TIMEGEN_OFF,
+ MDSS_EVENT_CLOSE,
+ MDSS_EVENT_SUSPEND,
+ MDSS_EVENT_RESUME,
+};
+
/* panel info type */
struct lcd_panel_info {
u32 vsync_enable;
@@ -178,14 +189,11 @@
struct mdss_panel_data {
struct mdss_panel_info panel_info;
- void (*set_backlight) (struct mdss_panel_data *pdata,
- u32 bl_level);
- int (*intf_unprepare) (struct mdss_panel_data *pdata);
+ void (*set_backlight) (struct mdss_panel_data *pdata, u32 bl_level);
unsigned char *mmss_cc_base;
/* function entry chain */
- int (*on) (struct mdss_panel_data *pdata);
- int (*off) (struct mdss_panel_data *pdata);
+ int (*event_handler) (struct mdss_panel_data *pdata, int e, void *arg);
};
int mdss_register_panel(struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index d4c924f..c3dc06b 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,15 +25,10 @@
#include "mdss_panel.h"
-static int mdss_wb_on(struct mdss_panel_data *pdata)
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
{
- pr_debug("%s\n", __func__);
- return 0;
-}
-
-static int mdss_wb_off(struct mdss_panel_data *pdata)
-{
- pr_debug("%s\n", __func__);
+ pr_debug("%s: event=%d\n", __func__, event);
return 0;
}
@@ -75,8 +70,7 @@
pdata->panel_info.pdest = DISPLAY_3;
pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
- pdata->on = mdss_wb_on;
- pdata->off = mdss_wb_off;
+ pdata->event_handler = mdss_wb_event_handler;
pdev->dev.platform_data = pdata;
rc = mdss_register_panel(pdata);
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
new file mode 100644
index 0000000..6a63964
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -0,0 +1,1184 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/mhl_8334.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_io_util.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+
+#define pr_debug_intr(...) pr_debug("\n")
+
+enum mhl_gpio_type {
+ MHL_TX_RESET_GPIO,
+ MHL_TX_INTR_GPIO,
+ MHL_TX_PMIC_PWR_GPIO,
+ MHL_TX_MAX_GPIO,
+};
+
+enum mhl_vreg_type {
+ MHL_TX_3V_VREG,
+ MHL_TX_MAX_VREG,
+};
+
+struct mhl_tx_platform_data {
+ /* Data filled from device tree nodes */
+ struct dss_gpio *gpios[MHL_TX_MAX_GPIO];
+ struct dss_vreg *vregs[MHL_TX_MAX_VREG];
+ int irq;
+};
+
+struct mhl_tx_ctrl {
+ struct platform_device *pdev;
+ struct mhl_tx_platform_data *pdata;
+ struct i2c_client *i2c_handle;
+ uint8_t cur_state;
+ uint8_t chip_rev_id;
+ int mhl_mode;
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+ DEV_PAGE_TPI_0 ,
+ DEV_PAGE_TX_L0_0 ,
+ DEV_PAGE_TX_L1_0 ,
+ DEV_PAGE_TX_2_0 ,
+ DEV_PAGE_TX_3_0 ,
+ DEV_PAGE_CBUS ,
+ DEV_PAGE_DDC_EDID ,
+ DEV_PAGE_DDC_SEGM ,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+ enum mhl_st_type to_mode);
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl,
+ uint8_t to_state);
+
+static int mhl_i2c_reg_read(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset)
+{
+ int rc = -1;
+ uint8_t buffer = 0;
+
+ rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+ reg_offset, &buffer);
+ if (rc) {
+ pr_err("%s: slave=%x, off=%x\n",
+ __func__, slave_addrs[slave_addr_index], reg_offset);
+ return rc;
+ }
+ return buffer;
+}
+
+
+static int mhl_i2c_reg_write(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t value)
+{
+ return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+ reg_offset, &value);
+}
+
+static void mhl_i2c_reg_modify(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t mask, uint8_t val)
+{
+ uint8_t temp;
+
+ temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+ temp &= (~mask);
+ temp |= (mask & val);
+ mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+ struct mhl_tx_platform_data *pdata)
+{
+ int i, rc = 0;
+ struct device_node *of_node = NULL;
+ struct dss_gpio *temp_gpio = NULL;
+ i = 0;
+
+ if (!dev || !pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+ if (!of_node) {
+ pr_err("%s: invalid of_node\n", __func__);
+ goto error;
+ }
+
+ pr_debug("%s: id=%d\n", __func__, dev->id);
+
+ /* GPIOs */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ /* RESET */
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+ pr_debug("%s: rst gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+ /* PWR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+ pr_debug("%s: pmic gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+ /* INTR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+ pr_debug("%s: intr gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+ return 0;
+error:
+ pr_err("%s: ret due to err\n", __func__);
+ for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+ if (pdata->gpios[i])
+ devm_kfree(dev, pdata->gpios[i]);
+ return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ gpio_set_value(mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+ on);
+ return 0;
+}
+
+static void cbus_reset(struct i2c_client *client)
+{
+ uint8_t i;
+
+ /*
+ * REG_SRST
+ */
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+ msleep(20);
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+ /*
+ * REG_INTR1 and REG_INTR4
+ */
+ MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+ MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+ BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+ /* Unmask CBUS1 Intrs */
+ MHL_SII_CBUS_WR(0x0009,
+ BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ /* Unmask CBUS2 Intrs */
+ MHL_SII_CBUS_WR(0x001F, BIT2 | BIT3);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Enable WRITE_STAT interrupt for writes to
+ * all 4 MSC Status registers.
+ */
+ MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+ /*
+ * Enable SET_INT interrupt for writes to
+ * all 4 MSC Interrupt registers.
+ */
+ MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+ }
+ return;
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+ uint8_t regval;
+
+ /* Increase DDC translation layer timer*/
+ MHL_SII_CBUS_WR(0x0007, 0xF2);
+ /* Drive High Time */
+ MHL_SII_CBUS_WR(0x0036, 0x03);
+ /* Use programmed timing */
+ MHL_SII_CBUS_WR(0x0039, 0x30);
+ /* CBUS Drive Strength */
+ MHL_SII_CBUS_WR(0x0040, 0x03);
+ /*
+ * Write initial default settings
+ * to devcap regs: default settings
+ */
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+ MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+ DEVCAP_VAL_VID_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_AUD_LINK_MODE,
+ DEVCAP_VAL_AUD_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+ DEVCAP_VAL_SCRATCHPAD_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_INT_STAT_SIZE,
+ DEVCAP_VAL_INT_STAT_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+ /* Make bits 2,3 (initiator timeout) to 1,1
+ * for register CBUS_LINK_CONTROL_2
+ * REG_CBUS_LINK_CONTROL_2
+ */
+ regval = MHL_SII_CBUS_RD(0x0031);
+ regval = (regval | 0x0C);
+ /* REG_CBUS_LINK_CONTROL_2 */
+ MHL_SII_CBUS_WR(0x0031, regval);
+ /* REG_MSC_TIMEOUT_LIMIT */
+ MHL_SII_CBUS_WR(0x0022, 0x0F);
+ /* REG_CBUS_LINK_CONTROL_1 */
+ MHL_SII_CBUS_WR(0x0030, 0x01);
+ /* disallow vendor specific commands */
+ MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct i2c_client *client, bool mhl_disc_en)
+{
+ /*
+ * ============================================
+ * POWER UP
+ * ============================================
+ */
+
+ /* Power up 1.2V core */
+ MHL_SII_PAGE1_WR(0x003D, 0x3F);
+ /*
+ * Wait for the source power to be enabled
+ * before enabling pll clocks.
+ */
+ msleep(50);
+ /* Enable Tx PLL Clock */
+ MHL_SII_PAGE2_WR(0x0011, 0x01);
+ /* Enable Tx Clock Path and Equalizer */
+ MHL_SII_PAGE2_WR(0x0012, 0x11);
+ /* Tx Source Termination ON */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ /* Enable 1X MHL Clock output */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xAC);
+ /* Tx Differential Driver Config */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xD9);
+ /* PLL Bandwidth Control */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x02);
+ /*
+ * ============================================
+ * Analog PLL Control
+ * ============================================
+ */
+ /* Enable Rx PLL clock */
+ MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL, 0x00);
+ MHL_SII_PAGE0_WR(0x00F8, 0x0C);
+ MHL_SII_PAGE0_WR(0x0085, 0x02);
+ MHL_SII_PAGE2_WR(0x0000, 0x00);
+ MHL_SII_PAGE2_WR(0x0013, 0x60);
+ /* PLL Cal ref sel */
+ MHL_SII_PAGE2_WR(0x0017, 0x03);
+ /* VCO Cal */
+ MHL_SII_PAGE2_WR(0x001A, 0x20);
+ /* Auto EQ */
+ MHL_SII_PAGE2_WR(0x0022, 0xE0);
+ MHL_SII_PAGE2_WR(0x0023, 0xC0);
+ MHL_SII_PAGE2_WR(0x0024, 0xA0);
+ MHL_SII_PAGE2_WR(0x0025, 0x80);
+ MHL_SII_PAGE2_WR(0x0026, 0x60);
+ MHL_SII_PAGE2_WR(0x0027, 0x40);
+ MHL_SII_PAGE2_WR(0x0028, 0x20);
+ MHL_SII_PAGE2_WR(0x0029, 0x00);
+ /* Rx PLL Bandwidth 4MHz */
+ MHL_SII_PAGE2_WR(0x0031, 0x0A);
+ /* Rx PLL Bandwidth value from I2C */
+ MHL_SII_PAGE2_WR(0x0045, 0x06);
+ MHL_SII_PAGE2_WR(0x004B, 0x06);
+ /* Manual zone control */
+ MHL_SII_PAGE2_WR(0x004C, 0xE0);
+ /* PLL Mode value */
+ MHL_SII_PAGE2_WR(0x004D, 0x00);
+ MHL_SII_PAGE0_WR(0x0008, 0x35);
+ /*
+ * Discovery Control and Status regs
+ * Setting De-glitch time to 50 ms (default)
+ * Switch Control Disabled
+ */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+ /* 1.8V CBUS VTH */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x55);
+ /* RGND and single Discovery attempt */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+ /* Ignore VBUS */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+
+ /* Enable CBUS Discovery */
+ if (mhl_disc_en) {
+ /* Enable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+ /* Pull-up resistance off for IDLE state */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0xA4);
+ } else {
+ /* Disable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+ }
+
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+ /* MHL CBUS Discovery - immediate comm. */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+
+ /* Enable Auto Soft RESET */
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+ /* HDMI Transcode mode enable */
+ MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+ cbus_reset(client);
+ init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ switch (to_mode) {
+ case POWER_STATE_D0_NO_MHL:
+ break;
+ case POWER_STATE_D0_MHL:
+ mhl_init_reg_settings(client, true);
+ /* REG_DISC_CTRL1 */
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+ /* TPI_DEVICE_POWER_STATE_CTRL_REG */
+ mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+ 0x00);
+ break;
+ case POWER_STATE_D3:
+ if (mhl_ctrl->cur_state == POWER_STATE_D3)
+ break;
+
+ /* Force HPD to 0 when not in MHL mode. */
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ /*
+ * Change TMDS termination to high impedance
+ * on disconnection.
+ */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+ msleep(50);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+ MHL_SII_PAGE3_MOD(0x003D, BIT0,
+ 0x00);
+ mhl_ctrl->cur_state = POWER_STATE_D3;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+ if (to_state == HPD_UP) {
+ /*
+ * Drive HPD to UP state
+ *
+ * The below two reg configs combined
+ * enable TMDS output.
+ */
+
+ /* Enable TMDS on TMDS_CCTRL */
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+
+ /*
+ * Set HPD_OUT_OVR_EN = HPD State
+ * EDID read and Un-force HPD (from low)
+ * propogate to src let HPD float by clearing
+ * HPD OUT OVRRD EN
+ */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0x00);
+ } else {
+ /*
+ * Drive HPD to DOWN state
+ * Disable TMDS Output on REG_TMDS_CCTRL
+ * Enable/Disable TMDS output (MHL TMDS output only)
+ */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, BIT4);
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+ }
+ return;
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t val;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ pr_debug("%s: cur st [0x%x]\n", __func__,
+ mhl_ctrl->cur_state);
+
+ if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+ /* Already in D0 - MHL power state */
+ pr_err("%s: cur st not D0\n", __func__);
+ return;
+ }
+ /* spin_lock_irqsave(&mhl_state_lock, flags); */
+ mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+ /* spin_unlock_irqrestore(&mhl_state_lock, flags); */
+
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ MHL_SII_CBUS_WR(0x07, 0xF2);
+
+ /*
+ * Keep the discovery enabled. Need RGND interrupt
+ * Possibly chip disables discovery after MHL_EST??
+ * Need to re-enable here
+ */
+ val = MHL_SII_PAGE3_RD(0x10);
+ MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+ return;
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ /*
+ * MHL TX CTL1
+ * Disabling Tx termination
+ */
+ MHL_SII_PAGE3_WR(0x30, 0xD0);
+
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ /*
+ * Only if MHL-USB handshake is not implemented
+ */
+ mhl_init_reg_settings(client, true);
+ return;
+}
+
+static int mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t rgnd_imp;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ /* DISC STATUS REG 2 */
+ rgnd_imp = (mhl_i2c_reg_read(client,
+ TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
+ pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+ if (0x02 == rgnd_imp) {
+ pr_debug("%s: mhl sink\n", __func__);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT0, BIT0);
+ mhl_ctrl->mhl_mode = 1;
+ } else {
+ pr_debug("%s: non-mhl sink\n", __func__);
+ mhl_ctrl->mhl_mode = 0;
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT3, BIT3);
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ }
+ return mhl_ctrl->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*disable discovery*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+ /* force USB ID switch to open*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+ /* force HPD to 0 when not in mhl mode. */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ msleep(50);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+ uint8_t tmds_cstat;
+ uint8_t mhl_fifo_status;
+
+ /* tmds cstat */
+ tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+ pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+ tmds_cstat);
+
+ if (!(tmds_cstat & BIT1))
+ return;
+
+ mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+ pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+ mhl_fifo_status);
+ if (mhl_fifo_status & 0x0C) {
+ MHL_SII_REG_NAME_WR(REG_INTR5, 0x0C);
+ pr_debug("%s: mhl fifo rst\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+ }
+}
+
+
+static void dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t status, reg ;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* INTR_STATUS4 */
+ status = MHL_SII_REG_NAME_RD(REG_INTR4);
+ pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+ if ((0x00 == status) &&\
+ (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+ pr_err("%s: invalid intr\n", __func__);
+ return;
+ }
+
+ if (0xFF == status) {
+ pr_debug("%s: invalid intr 0xff\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+ return;
+ }
+
+ if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+ pr_debug("%s: scdt intr\n", __func__);
+ scdt_st_chg(client);
+ }
+
+ if (status & BIT1)
+ pr_debug("mhl: int4 bit1 set\n");
+
+ /* mhl_est interrupt */
+ if (status & BIT2) {
+ pr_debug("%s: mhl_est st=%02X\n", __func__,
+ (int) status);
+ mhl_msm_connection(mhl_ctrl);
+ } else if (status & BIT3) {
+ pr_debug("%s: uUSB-a type dev detct\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_DISC_STAT2, 0x80);
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ }
+
+ if (status & BIT5) {
+ /* clr intr - reg int4 */
+ pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+ (int)status);
+ reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+ MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+ mhl_msm_disconnection(mhl_ctrl);
+ }
+
+ if ((mhl_ctrl->cur_state != POWER_STATE_D0_MHL) &&\
+ (status & BIT6)) {
+ /* rgnd rdy Intr */
+ pr_debug("%s: rgnd ready intr\n", __func__);
+ switch_mode(mhl_ctrl, POWER_STATE_D0_MHL);
+ mhl_msm_read_rgnd_int(mhl_ctrl);
+ }
+
+ /* Can't succeed at these in D3 */
+ if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&\
+ (status & BIT4)) {
+ /* cbus lockout interrupt?
+ * Hardware detection mechanism figures that
+ * CBUS line is latched and raises this intr
+ * where we force usb switch open and release
+ */
+ pr_warn("%s: cbus locked out!\n", __func__);
+ force_usb_switch_open(mhl_ctrl);
+ release_usb_switch_open(mhl_ctrl);
+ }
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+
+ return;
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_5_stat;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*
+ * Clear INT 5
+ * INTR5 is related to FIFO underflow/overflow reset
+ * which is handled in 8334 by auto FIFO reset
+ */
+ intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+ MHL_SII_REG_NAME_WR(REG_INTR5, intr_5_stat);
+}
+
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_1_stat;
+ uint8_t cbus_stat;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* INTR STATUS 1 */
+ intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+ if (!intr_1_stat)
+ return;
+
+ /* Clear interrupts */
+ MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+ if (BIT6 & intr_1_stat) {
+ /*
+ * HPD status change event is pending
+ * Read CBUS HPD status for this info
+ * MSC REQ ABRT REASON
+ */
+ cbus_stat = MHL_SII_CBUS_RD(0x0D);
+ if (BIT6 & cbus_stat)
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ }
+ return;
+}
+
+static void clear_all_intrs(struct i2c_client *client)
+{
+ uint8_t regval = 0x00;
+
+ pr_debug_intr("********* exiting isr mask check ?? *************\n");
+ pr_debug_intr("int1 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR1));
+ pr_debug_intr("int3 mask = %02X\n",
+ (int) MHL_SII_PAGE0_RD(0x0077));
+ pr_debug_intr("int4 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR4));
+ pr_debug_intr("int5 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR5));
+ pr_debug_intr("cbus1 mask = %02X\n",
+ (int) MHL_SII_CBUS_RD(0x0009));
+ pr_debug_intr("cbus2 mask = %02X\n",
+ (int) MHL_SII_CBUS_RD(0x001F));
+ pr_debug_intr("********* end of isr mask check *************\n");
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR1);
+ pr_debug_intr("int1 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR1, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR2);
+ pr_debug_intr("int2 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR2, regval);
+
+ regval = MHL_SII_PAGE0_RD(0x0073);
+ pr_debug_intr("int3 st = %02X\n", (int)regval);
+ MHL_SII_PAGE0_WR(0x0073, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR4);
+ pr_debug_intr("int4 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR4, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR5);
+ pr_debug_intr("int5 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR5, regval);
+
+ regval = MHL_SII_CBUS_RD(0x0008);
+ pr_debug_intr("cbusInt st = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x0008, regval);
+
+ regval = MHL_SII_CBUS_RD(0x001E);
+ pr_debug_intr("CBUS intR_2: %d\n", (int)regval);
+ MHL_SII_CBUS_WR(0x001E, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A0);
+ pr_debug_intr("A0 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A1);
+ pr_debug_intr("A1 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A2);
+ pr_debug_intr("A2 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A3);
+ pr_debug_intr("A3 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B0);
+ pr_debug_intr("B0 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B1);
+ pr_debug_intr("B1 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B2);
+ pr_debug_intr("B2 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B3);
+ pr_debug_intr("B3 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E0);
+ pr_debug_intr("E0 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E1);
+ pr_debug_intr("E1 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E2);
+ pr_debug_intr("E2 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E3);
+ pr_debug_intr("E3 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F0);
+ pr_debug_intr("F0 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F1);
+ pr_debug_intr("F1 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F2);
+ pr_debug_intr("F2 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F3);
+ pr_debug_intr("F3 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F3, regval);
+ pr_debug_intr("********* end of exiting in isr *************\n");
+}
+
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+ pr_debug("%s: Getting Interrupts\n", __func__);
+
+ /*
+ * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+ * interrupts. In D3, we get only RGND
+ */
+ dev_detect_isr(mhl_ctrl);
+
+ pr_debug("%s: cur pwr state is [0x%x]\n",
+ __func__, mhl_ctrl->cur_state);
+ if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+ /*
+ * If dev_detect_isr() didn't move the tx to D3
+ * on disconnect, continue to check other
+ * interrupt sources.
+ */
+ mhl_misc_isr(mhl_ctrl);
+
+ /*
+ * Check for any peer messages for DCAP_CHG etc
+ * Dispatch to have the CBUS module working only
+ * once connected.
+ mhl_cbus_isr(mhl_ctrl);
+ */
+ mhl_hpd_stat_isr(mhl_ctrl);
+ }
+
+ clear_all_intrs(mhl_ctrl->i2c_handle);
+
+ return IRQ_HANDLED;
+}
+
+static int mhl_tx_chip_init(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t chip_rev_id = 0x00;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* Reset the TX chip */
+ mhl_sii_reset_pin(mhl_ctrl, 0);
+ msleep(20);
+ mhl_sii_reset_pin(mhl_ctrl, 1);
+ /* TX PR-guide requires a 100 ms wait here */
+ msleep(100);
+
+ /* Read the chip rev ID */
+ chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+ pr_debug("MHL: chip rev ID read=[%x]\n", chip_rev_id);
+
+ /*
+ * Need to disable MHL discovery if
+ * MHL-USB handshake is implemented
+ */
+ mhl_init_reg_settings(client, true);
+ return 0;
+}
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+ static struct regulator *reg_8941_l24;
+ static struct regulator *reg_8941_l02;
+ int rc;
+
+ pr_debug("Inside %s\n", __func__);
+ if (!reg_8941_l24) {
+ reg_8941_l24 = regulator_get(&client->dev,
+ "avcc_18");
+ if (IS_ERR(reg_8941_l24)) {
+ pr_err("could not get reg_8038_l20, rc = %ld\n",
+ PTR_ERR(reg_8941_l24));
+ return -ENODEV;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l24);
+ else
+ rc = regulator_disable(reg_8941_l24);
+ if (rc) {
+ pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+ "avcc_1.8V", enable, rc);
+ return rc;
+ } else {
+ pr_debug("%s: vreg L24 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ if (!reg_8941_l02) {
+ reg_8941_l02 = regulator_get(&client->dev,
+ "avcc_12");
+ if (IS_ERR(reg_8941_l02)) {
+ pr_err("could not get reg_8941_l02, rc = %ld\n",
+ PTR_ERR(reg_8941_l02));
+ return -ENODEV;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l02);
+ else
+ rc = regulator_disable(reg_8941_l02);
+ if (rc) {
+ pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+ "avcc_1.2V", enable, rc);
+ return rc;
+ } else {
+ pr_debug("%s: vreg L02 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ return rc;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+ int ret;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+ pr_debug("%s\n", __func__);
+ if (on) {
+ ret = gpio_request(pwr_gpio,
+ mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: mhl pwr gpio req failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = gpio_direction_output(pwr_gpio, 1);
+ if (ret < 0) {
+ pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = mhl_sii_reg_config(client, true);
+ if (ret) {
+ pr_err("%s: regulator enable failed\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: mhl sii power on successful\n", __func__);
+ } else {
+ pr_warn("%s: turning off pwr controls\n", __func__);
+ mhl_sii_reg_config(client, false);
+ gpio_free(pwr_gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ int ret;
+ struct dss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+ /* caused too many line spills */
+ temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+ temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+ if (on) {
+ if (gpio_is_valid(temp_reset_gpio->gpio)) {
+ ret = gpio_request(temp_reset_gpio->gpio,
+ temp_reset_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+ __func__, temp_reset_gpio->gpio, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+ if (ret < 0) {
+ pr_err("%s: set dirn rst failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ }
+ if (gpio_is_valid(temp_intr_gpio->gpio)) {
+ ret = gpio_request(temp_intr_gpio->gpio,
+ temp_intr_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: intr_gpio req failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_input(temp_intr_gpio->gpio);
+ if (ret < 0) {
+ pr_err("%s: set dirn intr failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+ temp_intr_gpio->gpio);
+ pr_debug("%s: gpio_to_irq=%d\n",
+ __func__, mhl_ctrl->i2c_handle->irq);
+ }
+ } else {
+ pr_warn("%s: freeing gpios\n", __func__);
+ gpio_free(temp_intr_gpio->gpio);
+ gpio_free(temp_reset_gpio->gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct mhl_tx_platform_data *pdata = NULL;
+ struct mhl_tx_ctrl *mhl_ctrl;
+
+ mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+ if (!mhl_ctrl) {
+ pr_err("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ rc = mhl_tx_get_dt_data(&client->dev, pdata);
+ if (rc) {
+ pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+ __func__, rc);
+ goto failed_dt_data;
+ }
+ mhl_ctrl->i2c_handle = client;
+ mhl_ctrl->pdata = pdata;
+ i2c_set_clientdata(client, mhl_ctrl);
+ }
+
+ /*
+ * Regulator init
+ */
+ rc = mhl_vreg_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: vreg init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ /*
+ * GPIO init
+ */
+ rc = mhl_gpio_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: gpio init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ /*
+ * Other initializations
+ * such tx specific
+ */
+ rc = mhl_tx_chip_init(mhl_ctrl);
+ if (rc) {
+ pr_err("%s: tx chip init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ pr_debug("%s: IRQ from GPIO INTR = %d\n",
+ __func__, mhl_ctrl->i2c_handle->irq);
+ pr_debug("%s: Driver name = [%s]\n", __func__,
+ client->dev.driver->name);
+ rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+ &mhl_tx_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->dev.driver->name, mhl_ctrl);
+ if (rc) {
+ pr_err("request_threaded_irq failed, status: %d\n",
+ rc);
+ goto failed_probe;
+ } else {
+ pr_debug("request_threaded_irq succeeded\n");
+ }
+ pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+ return 0;
+failed_probe:
+failed_dt_data:
+ if (pdata)
+ devm_kfree(&client->dev, pdata);
+failed_no_mem:
+ if (mhl_ctrl)
+ devm_kfree(&client->dev, mhl_ctrl);
+ pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+ if (!mhl_ctrl) {
+ pr_warn("%s: i2c get client data failed\n", __func__);
+ return -EINVAL;
+ }
+
+ free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+ mhl_gpio_config(mhl_ctrl, 0);
+ mhl_vreg_config(mhl_ctrl, 0);
+ if (mhl_ctrl->pdata)
+ devm_kfree(&client->dev, mhl_ctrl->pdata);
+ devm_kfree(&client->dev, mhl_ctrl);
+ return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+ { MHL_DRIVER_NAME, 0 },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+static struct of_device_id mhl_match_table[] = {
+ {.compatible = COMPATIBLE_NAME,},
+ { },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+ .driver = {
+ .name = MHL_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mhl_match_table,
+ },
+ .probe = mhl_i2c_probe,
+ .remove = mhl_i2c_remove,
+ .id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 69ca0a3..ecac82d 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -460,6 +460,12 @@
{
struct dcs_cmd_req cmdreq;
+ if (mipi_novatek_pdata &&
+ mipi_novatek_pdata->gpio_set_backlight) {
+ mipi_novatek_pdata->gpio_set_backlight(mfd->bl_level);
+ return;
+ }
+
if ((mipi_novatek_pdata->enable_wled_bl_ctrl)
&& (wled_trigger_initialized)) {
led_trigger_event(bkl_led_trigger, mfd->bl_level);
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5189b6d..993ec01 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -532,6 +532,7 @@
mfd->suspend.sw_refreshing_enable = mfd->sw_refreshing_enable;
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_on = mfd->panel_power_on;
+ mfd->suspend.op_suspend = true;
if (mfd->op_enable) {
ret =
@@ -597,6 +598,8 @@
MSM_FB_INFO("msm_fb_resume: can't turn on display!\n");
}
+ mfd->suspend.op_suspend = false;
+
return ret;
}
#endif
@@ -2918,17 +2921,9 @@
return ret;
}
-static int msmfb_overlay_commit(struct fb_info *info, unsigned long *argp)
+static int msmfb_overlay_commit(struct fb_info *info)
{
- int ret, ndx;
-
- ret = copy_from_user(&ndx, argp, sizeof(ndx));
- if (ret) {
- pr_err("%s: ioctl failed\n", __func__);
- return ret;
- }
-
- return mdp4_overlay_commit(info, ndx);
+ return mdp4_overlay_commit(info);
}
static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
@@ -3362,7 +3357,7 @@
break;
case MSMFB_OVERLAY_COMMIT:
down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_commit(info, argp);
+ ret = msmfb_overlay_commit(info);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_PLAY:
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 2fd25cc..9c4f3d3 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -53,6 +53,7 @@
boolean op_enable;
boolean sw_refreshing_enable;
boolean panel_power_on;
+ boolean op_suspend;
};
struct msmfb_writeback_data_list {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index a9709fb..b84ae44 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -784,7 +784,11 @@
buf_pool->allocated--;
}
- memset(buf_entry, 0, sizeof(struct vcd_buffer_entry));
+ buf_entry->valid = buf_entry->allocated = buf_entry->in_use = 0;
+ buf_entry->alloc = buf_entry->virtual = buf_entry->physical = NULL;
+ buf_entry->sz = 0;
+ memset(&buf_entry->frame, 0, sizeof(struct vcd_frame_data));
+
buf_pool->validated--;
if (buf_pool->validated == 0)
vcd_free_buffer_pool_entries(buf_pool);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 98cce5b..29546b7 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -445,4 +445,5 @@
header-y += genlock.h
header-y += msm_audio_amrwb.h
header-y += coresight-stm.h
-header-y += ci-bridge-spi.h
\ No newline at end of file
+header-y += ci-bridge-spi.h
+header-y += msm_audio_amrwbplus.h
diff --git a/include/linux/mfd/pm8xxx/pm8921-bms.h b/include/linux/mfd/pm8xxx/pm8921-bms.h
index 82ec57d..6db6204 100644
--- a/include/linux/mfd/pm8xxx/pm8921-bms.h
+++ b/include/linux/mfd/pm8xxx/pm8921-bms.h
@@ -37,6 +37,10 @@
* is considered empty(mV)
* @enable_fcc_learning: if set the driver will learn full charge
* capacity of the battery upon end of charge
+ * @normal_voltage_calc_ms: The period of soc calculation in ms when battery
+ * voltage higher than cutoff voltage
+ * @low_voltage_calc_ms: The period of soc calculation in ms when battery
+ * voltage is near cutoff voltage
*/
struct pm8921_bms_platform_data {
struct pm8xxx_bms_core_data bms_cdata;
@@ -51,6 +55,8 @@
int ignore_shutdown_soc;
int adjust_soc_low_threshold;
int chg_term_ua;
+ int normal_voltage_calc_ms;
+ int low_voltage_calc_ms;
};
#if defined(CONFIG_PM8921_BMS) || defined(CONFIG_PM8921_BMS_MODULE)
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 4ad55f4..12094e2 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -96,10 +96,6 @@
* @get_batt_capacity_percent:
* a board specific function to return battery
* capacity. If null - a default one will be used
- * @dc_unplug_check: enables the reverse boosting fix for the DC_IN line
- * however, this should only be enabled for devices which
- * control the DC OVP FETs otherwise this option should
- * remain disabled
* @has_dc_supply: report DC online if this bit is set in board file
* @trkl_voltage: the trkl voltage in (mV) below which hw controlled
* trkl charging happens with linear charger
@@ -152,7 +148,6 @@
int64_t batt_id_min;
int64_t batt_id_max;
bool keep_btm_on_suspend;
- bool dc_unplug_check;
bool has_dc_supply;
int trkl_voltage;
int weak_voltage;
@@ -178,15 +173,6 @@
void pm8921_charger_vbus_draw(unsigned int mA);
int pm8921_charger_register_vbus_sn(void (*callback)(int));
void pm8921_charger_unregister_vbus_sn(void (*callback)(int));
-/**
- * pm8921_charger_enable -
- *
- * @enable: 1 means enable charging, 0 means disable
- *
- * Enable/Disable battery charging current, the device will still draw current
- * from the charging source
- */
-int pm8921_charger_enable(bool enable);
/**
* pm8921_is_usb_chg_plugged_in - is usb plugged in
@@ -312,10 +298,6 @@
static inline void pm8921_charger_unregister_vbus_sn(void (*callback)(int))
{
}
-static inline int pm8921_charger_enable(bool enable)
-{
- return -ENXIO;
-}
static inline int pm8921_is_usb_chg_plugged_in(void)
{
return -ENXIO;
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 2dea611..4e9e1ce 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -131,7 +131,7 @@
u32 bit_width; /* sit width 16,24,32 */
struct list_head wcd9xxx_ch_list; /* channel list */
u16 grph; /* slimbus group handle */
- u32 ch_mask;
+ unsigned long ch_mask;
wait_queue_head_t dai_wait;
};
diff --git a/include/linux/mfd/wcd9xxx/wcd9320_registers.h b/include/linux/mfd/wcd9xxx/wcd9320_registers.h
index 4b8626a..f9966be 100644
--- a/include/linux/mfd/wcd9xxx/wcd9320_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9320_registers.h
@@ -1334,9 +1334,16 @@
/* SLIMBUS Slave Registers */
#define TAIKO_SLIM_PGD_PORT_INT_EN0 (0x30)
-#define TAIKO_SLIM_PGD_PORT_INT_STATUS0 (0x34)
-#define TAIKO_SLIM_PGD_PORT_INT_CLR0 (0x38)
-#define TAIKO_SLIM_PGD_PORT_INT_SOURCE0 (0x60)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_0 (0x34)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_1 (0x35)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_0 (0x36)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_1 (0x37)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_0 (0x38)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_1 (0x39)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_0 (0x3A)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_1 (0x3B)
+#define TAIKO_SLIM_PGD_PORT_INT_RX_SOURCE0 (0x60)
+#define TAIKO_SLIM_PGD_PORT_INT_TX_SOURCE0 (0x70)
/* Macros for Packing Register Writes into a U32 */
#define TAIKO_PACKED_REG_SIZE sizeof(u32)
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index cb9d7fa..c9f57c5 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -130,4 +130,162 @@
DEV_PAGE_DDC_SEGM = (0x60),
};
+#define MHL_SII_PAGE0_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_L0, off)
+#define MHL_SII_PAGE0_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_L0, off, val)
+#define MHL_SII_PAGE0_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_L0, off, mask, val)
+
+
+#define MHL_SII_PAGE1_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_L1, off)
+#define MHL_SII_PAGE1_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_L1, off, val)
+#define MHL_SII_PAGE1_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_L1, off, mask, val)
+
+
+#define MHL_SII_PAGE2_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_2, off)
+#define MHL_SII_PAGE2_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_2, off, val)
+#define MHL_SII_PAGE2_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_2, off, mask, val)
+
+
+#define MHL_SII_PAGE3_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_3, off)
+#define MHL_SII_PAGE3_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_3, off, val)
+#define MHL_SII_PAGE3_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_3, off, mask, val)
+
+#define MHL_SII_CBUS_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_CBUS, off)
+#define MHL_SII_CBUS_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_CBUS, off, val)
+#define MHL_SII_CBUS_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_CBUS, off, mask, val)
+
+#define REG_SRST ((TX_PAGE_3 << 16) | 0x0000)
+#define REG_INTR1 ((TX_PAGE_L0 << 16) | 0x0071)
+#define REG_INTR1_MASK ((TX_PAGE_L0 << 16) | 0x0075)
+#define REG_INTR2 ((TX_PAGE_L0 << 16) | 0x0072)
+#define REG_TMDS_CCTRL ((TX_PAGE_L0 << 16) | 0x0080)
+
+#define REG_DISC_CTRL1 ((TX_PAGE_3 << 16) | 0x0010)
+#define REG_DISC_CTRL2 ((TX_PAGE_3 << 16) | 0x0011)
+#define REG_DISC_CTRL3 ((TX_PAGE_3 << 16) | 0x0012)
+#define REG_DISC_CTRL4 ((TX_PAGE_3 << 16) | 0x0013)
+#define REG_DISC_CTRL5 ((TX_PAGE_3 << 16) | 0x0014)
+#define REG_DISC_CTRL6 ((TX_PAGE_3 << 16) | 0x0015)
+#define REG_DISC_CTRL7 ((TX_PAGE_3 << 16) | 0x0016)
+#define REG_DISC_CTRL8 ((TX_PAGE_3 << 16) | 0x0017)
+#define REG_DISC_CTRL9 ((TX_PAGE_3 << 16) | 0x0018)
+#define REG_DISC_CTRL10 ((TX_PAGE_3 << 16) | 0x0019)
+#define REG_DISC_CTRL11 ((TX_PAGE_3 << 16) | 0x001A)
+#define REG_DISC_STAT ((TX_PAGE_3 << 16) | 0x001B)
+#define REG_DISC_STAT2 ((TX_PAGE_3 << 16) | 0x001C)
+
+#define REG_INT_CTRL ((TX_PAGE_3 << 16) | 0x0020)
+#define REG_INTR4 ((TX_PAGE_3 << 16) | 0x0021)
+#define REG_INTR4_MASK ((TX_PAGE_3 << 16) | 0x0022)
+#define REG_INTR5 ((TX_PAGE_3 << 16) | 0x0023)
+#define REG_INTR5_MASK ((TX_PAGE_3 << 16) | 0x0024)
+
+#define REG_MHLTX_CTL1 ((TX_PAGE_3 << 16) | 0x0030)
+#define REG_MHLTX_CTL2 ((TX_PAGE_3 << 16) | 0x0031)
+#define REG_MHLTX_CTL3 ((TX_PAGE_3 << 16) | 0x0032)
+#define REG_MHLTX_CTL4 ((TX_PAGE_3 << 16) | 0x0033)
+#define REG_MHLTX_CTL5 ((TX_PAGE_3 << 16) | 0x0034)
+#define REG_MHLTX_CTL6 ((TX_PAGE_3 << 16) | 0x0035)
+#define REG_MHLTX_CTL7 ((TX_PAGE_3 << 16) | 0x0036)
+#define REG_MHLTX_CTL8 ((TX_PAGE_3 << 16) | 0x0037)
+
+#define REG_TMDS_CSTAT ((TX_PAGE_3 << 16) | 0x0040)
+
+#define REG_CBUS_INTR_ENABLE ((TX_PAGE_CBUS << 16) | 0x0009)
+
+#define REG_DDC_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000B)
+#define REG_CBUS_BUS_STATUS ((TX_PAGE_CBUS << 16) | 0x000A)
+#define REG_PRI_XFR_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000D)
+#define REG_CBUS_PRI_FWR_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000E)
+#define REG_CBUS_PRI_START ((TX_PAGE_CBUS << 16) | 0x0012)
+#define REG_CBUS_PRI_ADDR_CMD ((TX_PAGE_CBUS << 16) | 0x0013)
+#define REG_CBUS_PRI_WR_DATA_1ST ((TX_PAGE_CBUS << 16) | 0x0014)
+#define REG_CBUS_PRI_WR_DATA_2ND ((TX_PAGE_CBUS << 16) | 0x0015)
+#define REG_CBUS_PRI_RD_DATA_1ST ((TX_PAGE_CBUS << 16) | 0x0016)
+#define REG_CBUS_PRI_RD_DATA_2ND ((TX_PAGE_CBUS << 16) | 0x0017)
+#define REG_CBUS_PRI_VS_CMD ((TX_PAGE_CBUS << 16) | 0x0018)
+#define REG_CBUS_PRI_VS_DATA ((TX_PAGE_CBUS << 16) | 0x0019)
+#define REG_CBUS_MSC_RETRY_INTERVAL ((TX_PAGE_CBUS << 16) | 0x001A)
+#define REG_CBUS_DDC_FAIL_LIMIT ((TX_PAGE_CBUS << 16) | 0x001C)
+#define REG_CBUS_MSC_FAIL_LIMIT ((TX_PAGE_CBUS << 16) | 0x001D)
+#define REG_CBUS_MSC_INT2_STATUS ((TX_PAGE_CBUS << 16) | 0x001E)
+#define REG_CBUS_MSC_INT2_ENABLE ((TX_PAGE_CBUS << 16) | 0x001F)
+#define REG_MSC_WRITE_BURST_LEN ((TX_PAGE_CBUS << 16) | 0x0020)
+#define REG_MSC_HEARTBEAT_CONTROL ((TX_PAGE_CBUS << 16) | 0x0021)
+#define REG_MSC_TIMEOUT_LIMIT ((TX_PAGE_CBUS << 16) | 0x0022)
+#define REG_CBUS_LINK_CONTROL_1 ((TX_PAGE_CBUS << 16) | 0x0030)
+#define REG_CBUS_LINK_CONTROL_2 ((TX_PAGE_CBUS << 16) | 0x0031)
+#define REG_CBUS_LINK_CONTROL_3 ((TX_PAGE_CBUS << 16) | 0x0032)
+#define REG_CBUS_LINK_CONTROL_4 ((TX_PAGE_CBUS << 16) | 0x0033)
+#define REG_CBUS_LINK_CONTROL_5 ((TX_PAGE_CBUS << 16) | 0x0034)
+#define REG_CBUS_LINK_CONTROL_6 ((TX_PAGE_CBUS << 16) | 0x0035)
+#define REG_CBUS_LINK_CONTROL_7 ((TX_PAGE_CBUS << 16) | 0x0036)
+#define REG_CBUS_LINK_STATUS_1 ((TX_PAGE_CBUS << 16) | 0x0037)
+#define REG_CBUS_LINK_STATUS_2 ((TX_PAGE_CBUS << 16) | 0x0038)
+#define REG_CBUS_LINK_CONTROL_8 ((TX_PAGE_CBUS << 16) | 0x0039)
+#define REG_CBUS_LINK_CONTROL_9 ((TX_PAGE_CBUS << 16) | 0x003A)
+#define REG_CBUS_LINK_CONTROL_10 ((TX_PAGE_CBUS << 16) | 0x003B)
+#define REG_CBUS_LINK_CONTROL_11 ((TX_PAGE_CBUS << 16) | 0x003C)
+#define REG_CBUS_LINK_CONTROL_12 ((TX_PAGE_CBUS << 16) | 0x003D)
+
+
+#define REG_CBUS_LINK_CTRL9_0 ((TX_PAGE_CBUS << 16) | 0x003A)
+#define REG_CBUS_LINK_CTRL9_1 ((TX_PAGE_CBUS << 16) | 0x00BA)
+
+#define REG_CBUS_DRV_STRENGTH_0 ((TX_PAGE_CBUS << 16) | 0x0040)
+#define REG_CBUS_DRV_STRENGTH_1 ((TX_PAGE_CBUS << 16) | 0x0041)
+#define REG_CBUS_ACK_CONTROL ((TX_PAGE_CBUS << 16) | 0x0042)
+#define REG_CBUS_CAL_CONTROL ((TX_PAGE_CBUS << 16) | 0x0043)
+
+#define REG_CBUS_SCRATCHPAD_0 ((TX_PAGE_CBUS << 16) | 0x00C0)
+#define REG_CBUS_DEVICE_CAP_0 ((TX_PAGE_CBUS << 16) | 0x0080)
+#define REG_CBUS_DEVICE_CAP_1 ((TX_PAGE_CBUS << 16) | 0x0081)
+#define REG_CBUS_DEVICE_CAP_2 ((TX_PAGE_CBUS << 16) | 0x0082)
+#define REG_CBUS_DEVICE_CAP_3 ((TX_PAGE_CBUS << 16) | 0x0083)
+#define REG_CBUS_DEVICE_CAP_4 ((TX_PAGE_CBUS << 16) | 0x0084)
+#define REG_CBUS_DEVICE_CAP_5 ((TX_PAGE_CBUS << 16) | 0x0085)
+#define REG_CBUS_DEVICE_CAP_6 ((TX_PAGE_CBUS << 16) | 0x0086)
+#define REG_CBUS_DEVICE_CAP_7 ((TX_PAGE_CBUS << 16) | 0x0087)
+#define REG_CBUS_DEVICE_CAP_8 ((TX_PAGE_CBUS << 16) | 0x0088)
+#define REG_CBUS_DEVICE_CAP_9 ((TX_PAGE_CBUS << 16) | 0x0089)
+#define REG_CBUS_DEVICE_CAP_A ((TX_PAGE_CBUS << 16) | 0x008A)
+#define REG_CBUS_DEVICE_CAP_B ((TX_PAGE_CBUS << 16) | 0x008B)
+#define REG_CBUS_DEVICE_CAP_C ((TX_PAGE_CBUS << 16) | 0x008C)
+#define REG_CBUS_DEVICE_CAP_D ((TX_PAGE_CBUS << 16) | 0x008D)
+#define REG_CBUS_DEVICE_CAP_E ((TX_PAGE_CBUS << 16) | 0x008E)
+#define REG_CBUS_DEVICE_CAP_F ((TX_PAGE_CBUS << 16) | 0x008F)
+#define REG_CBUS_SET_INT_0 ((TX_PAGE_CBUS << 16) | 0x00A0)
+#define REG_CBUS_SET_INT_1 ((TX_PAGE_CBUS << 16) | 0x00A1)
+#define REG_CBUS_SET_INT_2 ((TX_PAGE_CBUS << 16) | 0x00A2)
+#define REG_CBUS_SET_INT_3 ((TX_PAGE_CBUS << 16) | 0x00A3)
+#define REG_CBUS_WRITE_STAT_0 ((TX_PAGE_CBUS << 16) | 0x00B0)
+#define REG_CBUS_WRITE_STAT_1 ((TX_PAGE_CBUS << 16) | 0x00B1)
+#define REG_CBUS_WRITE_STAT_2 ((TX_PAGE_CBUS << 16) | 0x00B2)
+#define REG_CBUS_WRITE_STAT_3 ((TX_PAGE_CBUS << 16) | 0x00B3)
+
+#define GET_PAGE(x) (x >> 16)
+#define GET_OFF(x) (x & 0xffff)
+
+
+#define MHL_SII_REG_NAME_RD(arg)\
+ mhl_i2c_reg_read(client, GET_PAGE(arg), GET_OFF(arg))
+#define MHL_SII_REG_NAME_WR(arg, val)\
+ mhl_i2c_reg_write(client, GET_PAGE(arg), GET_OFF(arg), val)
+#define MHL_SII_REG_NAME_MOD(arg, mask, val)\
+ mhl_i2c_reg_modify(client, GET_PAGE(arg), GET_OFF(arg), mask, val)
+
#endif /* __MHL_MSM_H__ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f435221..c798cf9 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -468,6 +468,14 @@
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50);
+}
+
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host);
diff --git a/include/linux/msm_audio_amrwbplus.h b/include/linux/msm_audio_amrwbplus.h
new file mode 100644
index 0000000..aa17117
--- /dev/null
+++ b/include/linux/msm_audio_amrwbplus.h
@@ -0,0 +1,18 @@
+#ifndef __MSM_AUDIO_AMR_WB_PLUS_H
+#define __MSM_AUDIO_AMR_WB_PLUS_H
+
+#define AUDIO_GET_AMRWBPLUS_CONFIG_V2 _IOR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+2), struct msm_audio_amrwbplus_config_v2)
+#define AUDIO_SET_AMRWBPLUS_CONFIG_V2 _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_amrwbplus_config_v2)
+
+struct msm_audio_amrwbplus_config_v2 {
+ unsigned int size_bytes;
+ unsigned int version;
+ unsigned int num_channels;
+ unsigned int amr_band_mode;
+ unsigned int amr_dtx_mode;
+ unsigned int amr_frame_fmt;
+ unsigned int amr_lsf_idx;
+};
+#endif /* __MSM_AUDIO_AMR_WB_PLUS_H */
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
new file mode 100644
index 0000000..613cd9f
--- /dev/null
+++ b/include/linux/msm_ipa.h
@@ -0,0 +1,714 @@
+#ifndef _MSM_IPA_H_
+#define _MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_NAT_DMA 24
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_GET_ASYNC_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_MAX 29
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 20
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS (1ul << 0)
+#define IPA_FLT_PROTOCOL (1ul << 1)
+#define IPA_FLT_SRC_ADDR (1ul << 2)
+#define IPA_FLT_DST_ADDR (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE (1ul << 6)
+#define IPA_FLT_CODE (1ul << 7)
+#define IPA_FLT_SPI (1ul << 8)
+#define IPA_FLT_SRC_PORT (1ul << 9)
+#define IPA_FLT_DST_PORT (1ul << 10)
+#define IPA_FLT_TC (1ul << 11)
+#define IPA_FLT_FLOW_LABEL (1ul << 12)
+#define IPA_FLT_NEXT_HDR (1ul << 13)
+#define IPA_FLT_META_DATA (1ul << 14)
+#define IPA_FLT_FRAGMENT (1ul << 15)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+ IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC2_PROD,
+ IPA_CLIENT_HSIC3_PROD,
+ IPA_CLIENT_HSIC4_PROD,
+ IPA_CLIENT_HSIC5_PROD,
+ IPA_CLIENT_USB_PROD,
+ IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+ IPA_CLIENT_A2_EMBEDDED_PROD,
+ IPA_CLIENT_A2_TETHERED_PROD,
+ IPA_CLIENT_A5_LAN_WAN_PROD,
+ IPA_CLIENT_A5_CMD_PROD,
+ IPA_CLIENT_Q6_LAN_PROD,
+
+ IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC2_CONS,
+ IPA_CLIENT_HSIC3_CONS,
+ IPA_CLIENT_HSIC4_CONS,
+ IPA_CLIENT_HSIC5_CONS,
+ IPA_CLIENT_USB_CONS,
+ IPA_CLIENT_A2_EMBEDDED_CONS,
+ IPA_CLIENT_A2_TETHERED_CONS,
+ IPA_CLIENT_A5_LAN_WAN_CONS,
+ IPA_CLIENT_Q6_LAN_CONS,
+
+ IPA_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+ IPA_IP_v4,
+ IPA_IP_v6,
+ IPA_IP_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., A5): 5'd3
+ */
+enum ipa_flt_action {
+ IPA_PASS_TO_ROUTING,
+ IPA_PASS_TO_SRC_NAT,
+ IPA_PASS_TO_DST_NAT,
+ IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+ uint32_t attrib_mask;
+ uint16_t src_port_lo;
+ uint16_t src_port_hi;
+ uint16_t dst_port_lo;
+ uint16_t dst_port_hi;
+ uint8_t type;
+ uint8_t code;
+ uint32_t spi;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint32_t meta_data;
+ uint32_t meta_data_mask;
+ union {
+ struct {
+ uint8_t tos;
+ uint8_t protocol;
+ uint32_t src_addr;
+ uint32_t src_addr_mask;
+ uint32_t dst_addr;
+ uint32_t dst_addr_mask;
+ } v4;
+ struct {
+ uint8_t tc;
+ uint32_t flow_label;
+ uint8_t next_hdr;
+ uint32_t src_addr[4];
+ uint32_t src_addr_mask[4];
+ uint32_t dst_addr[4];
+ uint32_t dst_addr_mask[4];
+ } v6;
+ } u;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ */
+struct ipa_flt_rule {
+ enum ipa_flt_action action;
+ uint32_t rt_tbl_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+ it is not an index or an offset
+ * @attrib: attributes of the rule
+ */
+struct ipa_rt_rule {
+ enum ipa_client_type dst;
+ uint32_t hdr_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out paramerer, handle to header, valid when status is 0
+ * @status: out paramerer, status of header add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_add {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+ uint32_t hdr_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr: all headers need to go here back to
+ * back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+ uint8_t commit;
+ uint8_t num_hdrs;
+ struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr: out parameter, contents of specified header,
+ * valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ * valid only when ioctl return val is non-negative
+ * @is_partial: out parameter, indicates whether specified header is partial
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_copy_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl: out parameter, handle of header entry
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status: out parameter, status of header remove operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+ uint8_t commit;
+ uint8_t num_hdls;
+ struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status: output parameter, status of route rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of filtering rule add operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+ struct ipa_flt_rule rule;
+ uint8_t at_rear;
+ uint32_t flt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep: which "clients" pipe does this rule apply to?
+ * valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ enum ipa_client_type ep;
+ uint8_t global;
+ uint8_t num_rules;
+ struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status: output parameter, status of filtering rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_flt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl: output parameter, handle of routing table, valid only when ioctl
+ * return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+ enum ipa_ip_type ip;
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props: output parameter, number of tx properties
+ * valid only when ioctl return val is non-negative
+ * @num_rx_props: output parameter, number of rx properties
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_query_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_tx_props;
+ uint32_t num_rx_props;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type dst_pipe;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ */
+struct ipa_ioc_rx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type src_pipe;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+ uint8_t tbl_index;
+ uint32_t ipv4_rules_offset;
+ uint32_t expn_rules_offset;
+
+ uint32_t index_offset;
+ uint32_t index_expn_offset;
+
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+ uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+ uint8_t table_index;
+ uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr: type of table, from which the base address of the table
+ * can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+ uint8_t table_index;
+ uint8_t base_addr;
+
+ uint32_t offset;
+ uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+ uint8_t entries;
+ struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @msg_len: the length of the message in bytes
+ * @rsvd: reserved bits for future use.
+ *
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * buffer of atleast this size in an continuous loop, call will block when there
+ * is no pending async message.
+ *
+ * After reading a message's meta-data using above scheme, client should issue a
+ * GET_MSG IOCTL to actually read the message itself into the buffer of
+ * "msg_len" immediately following the ipa_msg_meta itself in the IOCTL payload
+ */
+struct ipa_msg_meta {
+ uint8_t msg_type;
+ uint16_t msg_len;
+ uint8_t rsvd;
+};
+
+/**
+ * actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_RT_TBL, \
+ uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_HDR, \
+ uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_SET_FLT, \
+ uint32_t)
+#define IPA_IOC_GET_ASYNC_MSG _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_ASYNC_MSG, \
+ struct ipa_msg_meta *)
+
+#endif /* _MSM_IPA_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 98050ce..d3f6792 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -71,7 +71,7 @@
#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
#define MSMFB_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
#define MSMFB_METADATA_SET _IOW(MSMFB_IOCTL_MAGIC, 162, struct msmfb_metadata)
-#define MSMFB_OVERLAY_COMMIT _IOW(MSMFB_IOCTL_MAGIC, 163, unsigned int)
+#define MSMFB_OVERLAY_COMMIT _IO(MSMFB_IOCTL_MAGIC, 163)
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 077ccfc..fc34b22 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -402,8 +402,11 @@
};
/**
- * enum qpnp_adc_meas_timer - Selects the measurement interval time.
+ * enum qpnp_adc_meas_timer_1 - Selects the measurement interval time.
* If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the USB_ID. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
* %ADC_MEAS_INTERVAL_0MS : 0ms
* %ADC_MEAS_INTERVAL_1P0MS : 1ms
* %ADC_MEAS_INTERVAL_2P0MS : 2ms
@@ -421,24 +424,126 @@
* %ADC_MEAS_INTERVAL_8S : 8seconds
* %ADC_MEAS_INTERVAL_16S: 16seconds
*/
-enum qpnp_adc_meas_timer {
- ADC_MEAS_INTERVAL_0MS = 0,
- ADC_MEAS_INTERVAL_1P0MS,
- ADC_MEAS_INTERVAL_2P0MS,
- ADC_MEAS_INTERVAL_3P9MS,
- ADC_MEAS_INTERVAL_7P8MS,
- ADC_MEAS_INTERVAL_15P6MS,
- ADC_MEAS_INTERVAL_31P3MS,
- ADC_MEAS_INTERVAL_62P5MS,
- ADC_MEAS_INTERVAL_125MS,
- ADC_MEAS_INTERVAL_250MS,
- ADC_MEAS_INTERVAL_500MS,
- ADC_MEAS_INTERVAL_1S,
- ADC_MEAS_INTERVAL_2S,
- ADC_MEAS_INTERVAL_4S,
- ADC_MEAS_INTERVAL_8S,
- ADC_MEAS_INTERVAL_16S,
- ADC_MEAS_INTERVAL_NONE,
+enum qpnp_adc_meas_timer_1 {
+ ADC_MEAS1_INTERVAL_0MS = 0,
+ ADC_MEAS1_INTERVAL_1P0MS,
+ ADC_MEAS1_INTERVAL_2P0MS,
+ ADC_MEAS1_INTERVAL_3P9MS,
+ ADC_MEAS1_INTERVAL_7P8MS,
+ ADC_MEAS1_INTERVAL_15P6MS,
+ ADC_MEAS1_INTERVAL_31P3MS,
+ ADC_MEAS1_INTERVAL_62P5MS,
+ ADC_MEAS1_INTERVAL_125MS,
+ ADC_MEAS1_INTERVAL_250MS,
+ ADC_MEAS1_INTERVAL_500MS,
+ ADC_MEAS1_INTERVAL_1S,
+ ADC_MEAS1_INTERVAL_2S,
+ ADC_MEAS1_INTERVAL_4S,
+ ADC_MEAS1_INTERVAL_8S,
+ ADC_MEAS1_INTERVAL_16S,
+ ADC_MEAS1_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_2 - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the batt_therm. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_100MS : 100ms
+ * %ADC_MEAS_INTERVAL_200MS : 200ms
+ * %ADC_MEAS_INTERVAL_300MS : 300ms
+ * %ADC_MEAS_INTERVAL_400MS : 400ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_600MS : 600ms
+ * %ADC_MEAS_INTERVAL_700MS : 700ms
+ * %ADC_MEAS_INTERVAL_800MS : 800ms
+ * %ADC_MEAS_INTERVAL_900MS : 900ms
+ * %ADC_MEAS_INTERVAL_1S: 1seconds
+ * %ADC_MEAS_INTERVAL_1P1S: 1.1seconds
+ * %ADC_MEAS_INTERVAL_1P2S: 1.2seconds
+ * %ADC_MEAS_INTERVAL_1P3S: 1.3seconds
+ * %ADC_MEAS_INTERVAL_1P4S: 1.4seconds
+ * %ADC_MEAS_INTERVAL_1P5S: 1.5seconds
+ */
+enum qpnp_adc_meas_timer_2 {
+ ADC_MEAS2_INTERVAL_0MS = 0,
+ ADC_MEAS2_INTERVAL_100MS,
+ ADC_MEAS2_INTERVAL_200MS,
+ ADC_MEAS2_INTERVAL_300MS,
+ ADC_MEAS2_INTERVAL_400MS,
+ ADC_MEAS2_INTERVAL_500MS,
+ ADC_MEAS2_INTERVAL_600MS,
+ ADC_MEAS2_INTERVAL_700MS,
+ ADC_MEAS2_INTERVAL_800MS,
+ ADC_MEAS2_INTERVAL_900MS,
+ ADC_MEAS2_INTERVAL_1S,
+ ADC_MEAS2_INTERVAL_1P1S,
+ ADC_MEAS2_INTERVAL_1P2S,
+ ADC_MEAS2_INTERVAL_1P3S,
+ ADC_MEAS2_INTERVAL_1P4S,
+ ADC_MEAS2_INTERVAL_1P5S,
+ ADC_MEAS2_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_3 - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * Do not set a polling rate greater than 1 second on PMIC 2.0.
+ * The max polling rate on the PMIC 2.0 appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_3S : 3seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_5S : 5seconds
+ * %ADC_MEAS_INTERVAL_6S: 6seconds
+ * %ADC_MEAS_INTERVAL_7S : 7seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_9S : 9seconds
+ * %ADC_MEAS_INTERVAL_10S : 10seconds
+ * %ADC_MEAS_INTERVAL_11S : 11seconds
+ * %ADC_MEAS_INTERVAL_12S : 12seconds
+ * %ADC_MEAS_INTERVAL_13S : 13seconds
+ * %ADC_MEAS_INTERVAL_14S : 14seconds
+ * %ADC_MEAS_INTERVAL_15S : 15seconds
+ */
+enum qpnp_adc_meas_timer_3 {
+ ADC_MEAS3_INTERVAL_0S = 0,
+ ADC_MEAS3_INTERVAL_1S,
+ ADC_MEAS3_INTERVAL_2S,
+ ADC_MEAS3_INTERVAL_3S,
+ ADC_MEAS3_INTERVAL_4S,
+ ADC_MEAS3_INTERVAL_5S,
+ ADC_MEAS3_INTERVAL_6S,
+ ADC_MEAS3_INTERVAL_7S,
+ ADC_MEAS3_INTERVAL_8S,
+ ADC_MEAS3_INTERVAL_9S,
+ ADC_MEAS3_INTERVAL_10S,
+ ADC_MEAS3_INTERVAL_11S,
+ ADC_MEAS3_INTERVAL_12S,
+ ADC_MEAS3_INTERVAL_13S,
+ ADC_MEAS3_INTERVAL_14S,
+ ADC_MEAS3_INTERVAL_15S,
+ ADC_MEAS3_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_select - Selects the timer for which
+ * the appropriate polling frequency is set.
+ * %ADC_MEAS_TIMER_SELECT1 - Select this timer if the client is USB_ID.
+ * %ADC_MEAS_TIMER_SELECT2 - Select this timer if the client is batt_therm.
+ * %ADC_MEAS_TIMER_SELECT3 - The timer is added only for completion. It is
+ * not used by kernel space clients and user space clients cannot set
+ * the polling frequency. The driver will set a appropriate polling
+ * frequency to measure the user space clients from qpnp_adc_meas_timer_3.
+ */
+enum qpnp_adc_meas_timer_select {
+ ADC_MEAS_TIMER_SELECT1 = 0,
+ ADC_MEAS_TIMER_SELECT2,
+ ADC_MEAS_TIMER_SELECT3,
+ ADC_MEAS_TIMER_NUM,
};
/**
@@ -455,6 +560,134 @@
};
/**
+ * Channel selection registers for each of the 5 configurable measurements
+ * Channels allotment is fixed for the given channels below.
+ * The USB_ID and BATT_THERM channels are used only by the kernel space
+ * USB and Battery drivers.
+ * The other 3 channels are configurable for use by userspace clients.
+ * USB_ID uses QPNP_ADC_TM_M0_ADC_CH_SEL_CTL
+ * BATT_TEMP uses QPNP_ADC_TM_M1_ADC_CH_SEL_CTL
+ * PA_THERM1 uses QPNP_ADC_TM_M2_ADC_CH_SEL_CTL
+ * PA_THERM2 uses QPNP_ADC_TM_M3_ADC_CH_SEL_CTL
+ * EMMC_THERM uses QPNP_ADC_TM_M4_ADC_CH_SEL_CTL
+ */
+enum qpnp_adc_tm_channel_select {
+ QPNP_ADC_TM_M0_ADC_CH_SEL_CTL = 0x48,
+ QPNP_ADC_TM_M1_ADC_CH_SEL_CTL = 0x68,
+ QPNP_ADC_TM_M2_ADC_CH_SEL_CTL = 0x70,
+ QPNP_ADC_TM_M3_ADC_CH_SEL_CTL = 0x78,
+ QPNP_ADC_TM_M4_ADC_CH_SEL_CTL = 0x80,
+ QPNP_ADC_TM_CH_SELECT_NONE
+};
+
+/**
+ * struct qpnp_adc_tm_config - Represent ADC Thermal Monitor configuration.
+ * @channel: ADC channel for which thermal monitoring is requested.
+ * @adc_code: The pre-calibrated digital output of a given ADC releative to the
+ * ADC reference.
+ * @high_thr_temp: Temperature at which high threshold notification is required.
+ * @low_thr_temp: Temperature at which low threshold notification is required.
+ * @low_thr_voltage : Low threshold voltage ADC code used for reverse
+ * calibration.
+ * @high_thr_voltage: High threshold voltage ADC code used for reverse
+ * calibration.
+ */
+struct qpnp_adc_tm_config {
+ int channel;
+ int adc_code;
+ int high_thr_temp;
+ int low_thr_temp;
+ int64_t high_thr_voltage;
+ int64_t low_thr_voltage;
+};
+
+/**
+ * enum qpnp_adc_tm_trip_type - Type for setting high/low temperature/voltage.
+ * %ADC_TM_TRIP_HIGH_WARM: Setting high temperature. Note that high temperature
+ * corresponds to low voltage. Driver handles this case
+ * appropriately to set high/low thresholds for voltage.
+ * threshold.
+ * %ADC_TM_TRIP_LOW_COOL: Setting low temperature.
+ */
+enum qpnp_adc_tm_trip_type {
+ ADC_TM_TRIP_HIGH_WARM = 0,
+ ADC_TM_TRIP_LOW_COOL,
+ ADC_TM_TRIP_NUM,
+};
+
+/**
+ * enum qpnp_tm_state - This lets the client know whether the threshold
+ * that was crossed was high/low.
+ * %ADC_TM_HIGH_STATE: Client is notified of crossing the requested high
+ * threshold.
+ * %ADC_TM_LOW_STATE: Client is notified of crossing the requested low
+ * threshold.
+ */
+enum qpnp_tm_state {
+ ADC_TM_HIGH_STATE = 0,
+ ADC_TM_LOW_STATE,
+ ADC_TM_STATE_NUM,
+};
+
+/**
+ * enum qpnp_state_request - Request to enable/disable the corresponding
+ * high/low voltage/temperature thresholds.
+ * %ADC_TM_HIGH_THR_ENABLE: Enable high voltage/temperature threshold.
+ * %ADC_TM_LOW_THR_ENABLE: Enable low voltage/temperature threshold.
+ * %ADC_TM_HIGH_LOW_THR_ENABLE: Enable high and low voltage/temperature
+ * threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high voltage/temperature threshold.
+ * %ADC_TM_LOW_THR_DISABLE: Disable low voltage/temperature threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high and low voltage/temperature
+ * threshold.
+ */
+enum qpnp_state_request {
+ ADC_TM_HIGH_THR_ENABLE = 0,
+ ADC_TM_LOW_THR_ENABLE,
+ ADC_TM_HIGH_LOW_THR_ENABLE,
+ ADC_TM_HIGH_THR_DISABLE,
+ ADC_TM_LOW_THR_DISABLE,
+ ADC_TM_HIGH_LOW_THR_DISABLE,
+ ADC_TM_THR_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_usbid_param - Represent USB_ID threshold
+ * monitoring configuration.
+ * @high_thr: High voltage threshold for which notification is requested.
+ * @low_thr: Low voltage threshold for which notification is requested.
+ * @state_request: Enable/disable the corresponding high and low voltage
+ * thresholds.
+ * @timer_interval: Select polling rate from qpnp_adc_meas_timer_1 type.
+ * @threshold_notification: Notification callback once threshold are crossed.
+ */
+struct qpnp_adc_tm_usbid_param {
+ int32_t high_thr;
+ int32_t low_thr;
+ enum qpnp_state_request state_request;
+ enum qpnp_adc_meas_timer_1 timer_interval;
+ void (*threshold_notification) (enum qpnp_tm_state state);
+};
+
+/**
+ * struct qpnp_adc_tm_btm_param - Represent Battery temperature threshold
+ * monitoring configuration.
+ * @high_temp: High temperature threshold for which notification is requested.
+ * @low_temp: Low temperature threshold for which notification is requested.
+ * @state_request: Enable/disable the corresponding high and low temperature
+ * thresholds.
+ * @timer_interval: Select polling rate from qpnp_adc_meas_timer_2 type.
+ * @threshold_notification: Notification callback once threshold are crossed.
+ */
+struct qpnp_adc_tm_btm_param {
+ int32_t high_temp;
+ int32_t low_temp;
+ enum qpnp_state_request state_request;
+ enum qpnp_adc_meas_timer_2 timer_interval;
+ void (*threshold_notification) (enum qpnp_tm_state state);
+};
+
+/**
* struct qpnp_vadc_linear_graph - Represent ADC characteristics.
* @dy: Numerator slope to calculate the gain.
* @dx: Denominator slope to calculate the gain.
@@ -541,7 +774,7 @@
};
/**
- * struct qpnp_adc_amux - AMUX properties for individual channel
+ * struct qpnp_vadc_amux - AMUX properties for individual channel
* @name: Channel string name.
* @channel_num: Channel in integer used from qpnp_adc_channels.
* @chan_path_prescaling: Channel scaling performed on the input signal.
@@ -624,7 +857,7 @@
* @adc_prop - ADC properties specific to the ADC peripheral.
* @amux_prop - AMUX properties representing the ADC peripheral.
* @adc_channels - ADC channel properties for the ADC peripheral.
- * @adc_irq - IRQ number that is mapped to the ADC peripheral.
+ * @adc_irq_eoc - End of Conversion IRQ.
* @adc_lock - ADC lock for access to the peripheral.
* @adc_rslt_completion - ADC result notification after interrupt
* is received.
@@ -637,7 +870,7 @@
struct qpnp_adc_properties *adc_prop;
struct qpnp_adc_amux_properties *amux_prop;
struct qpnp_vadc_amux *adc_channels;
- int adc_irq;
+ int adc_irq_eoc;
struct mutex adc_lock;
struct completion adc_rslt_completion;
struct qpnp_iadc_calib calib;
@@ -828,6 +1061,70 @@
* has not occured.
*/
int32_t qpnp_vadc_is_ready(void);
+/**
+ * qpnp_adc_tm_scaler() - Performs reverse calibration.
+ * @config: Thermal monitoring configuration.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution and
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ */
+static inline int32_t qpnp_adc_tm_scaler(struct qpnp_adc_tm_config *tm_config,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop)
+{ return -ENXIO; }
+/**
+ * qpnp_get_vadc_gain_and_offset() - Obtains the VADC gain and offset
+ * for absolute and ratiometric calibration.
+ * @param: The result in which the ADC offset and gain values are stored.
+ * @type: The calibration type whether client needs the absolute or
+ * ratiometric gain and offset values.
+ */
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type);
+/**
+ * qpnp_adc_btm_scaler() - Performs reverse calibration on the low/high
+ * temperature threshold values passed by the client.
+ * The function maps the temperature to voltage and applies
+ * ratiometric calibration on the voltage values.
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_btm_scaler(struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ * and convert given temperature to voltage on supported
+ * thermistor channels using 100k pull-up.
+ * @param: The input temperature values.
+ */
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_tm_config *param);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ * and converts the given ADC code to temperature for
+ * thermistor channels using 100k pull-up.
+ * @reg: The input ADC code.
+ * @result: The physical measurement temperature on the thermistor.
+ */
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(uint32_t reg, int64_t *result);
+/**
+ * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high
+ * voltage threshold values passed by the client.
+ * The function applies ratiometric calibration on the
+ * voltage values.
+ * @param: The input parameters that contain the low/high voltage
+ * threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_usb_scaler(struct qpnp_adc_tm_usbid_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
#else
static inline int32_t qpnp_vadc_read(uint32_t channel,
struct qpnp_vadc_result *result)
@@ -874,6 +1171,29 @@
{ return -ENXIO; }
static inline int32_t qpnp_vadc_is_ready(void)
{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_adc_chan_properties *chan_prop,
+ struct qpnp_adc_chan_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_get_vadc_gain_and_offset(
+ struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_usb_scaler(
+ struct qpnp_adc_tm_usbid_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_btm_scaler(
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_therm_voltage_pu2(
+ struct qpnp_adc_tm_config *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_voltage_therm_pu2(
+ uint32_t reg, int64_t *result)
+{ return -ENXIO; }
#endif
/* Public API */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 8f86fce..b1f534d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1811,6 +1811,28 @@
V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE = 1
};
#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE (V4L2_CID_MPEG_MSM_VIDC_BASE+24)
+#define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
+enum v4l2_mpeg_vidc_extradata {
+ V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION,
+ V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO,
+ V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP,
+ V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP,
+ V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP,
+ V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING,
+ V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE,
+ V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW,
+ V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI,
+ V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD,
+ V4L2_MPEG_VIDC_EXTRADATA_AFD_UD,
+ V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO,
+ V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB,
+ V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+};
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 07179e9..3dd0ccd 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -712,8 +712,8 @@
#define AFE_PORT_ID_PRIMARY_MI2S_TX 0x1001
#define AFE_PORT_ID_SECONDARY_MI2S_RX 0x1002
#define AFE_PORT_ID_SECONDARY_MI2S_TX 0x1003
-#define AFE_PORT_IDERTIARY_MI2S_RX 0x1004
-#define AFE_PORT_IDERTIARY_MI2S_TX 0x1005
+#define AFE_PORT_ID_TERTIARY_MI2S_RX 0x1004
+#define AFE_PORT_ID_TERTIARY_MI2S_TX 0x1005
#define AFE_PORT_ID_QUATERNARY_MI2S_RX 0x1006
#define AFE_PORT_ID_QUATERNARY_MI2S_TX 0x1007
#define AUDIO_PORT_ID_I2S_RX 0x1008
@@ -6123,6 +6123,11 @@
/* Band cut equalizer effect.*/
#define ASM_PARAM_EQ_BAND_CUT 6
+/* Voice get & set params */
+#define VOICE_CMD_SET_PARAM 0x0001133D
+#define VOICE_CMD_GET_PARAM 0x0001133E
+#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
/* ERROR CODES */
/* Success. The operation completed with no errors. */
diff --git a/include/sound/msm-dai-q6-v2.h b/include/sound/msm-dai-q6-v2.h
index 6c60318..4ecd435 100644
--- a/include/sound/msm-dai-q6-v2.h
+++ b/include/sound/msm-dai-q6-v2.h
@@ -20,10 +20,11 @@
#define MSM_MI2S_SD3 (1 << 3)
#define MSM_MI2S_CAP_RX 0
#define MSM_MI2S_CAP_TX 1
+
#define MSM_PRIM_MI2S 0
#define MSM_SEC_MI2S 1
#define MSM_TERT_MI2S 2
-#define MSM_QUAD_MI2S 3
+#define MSM_QUAT_MI2S 3
struct msm_dai_auxpcm_pdata {
const char *clk;
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index cb2f3d7..9c43d09 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -16,13 +16,13 @@
#define ADM_PATH_PLAYBACK 0x1
#define ADM_PATH_LIVE_REC 0x2
#define ADM_PATH_NONLIVE_REC 0x3
+#include <sound/q6afe-v2.h>
#include <sound/q6audio-v2.h>
-#define Q6_AFE_MAX_PORTS 32
/* multiple copp per stream. */
struct route_payload {
- unsigned int copp_ids[Q6_AFE_MAX_PORTS];
+ unsigned int copp_ids[AFE_MAX_PORTS];
unsigned short num_copps;
unsigned int session_id;
};
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 1324f8a..444b432 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -71,6 +71,8 @@
IDX_INT_FM_TX = 29,
IDX_RT_PROXY_PORT_001_RX = 30,
IDX_RT_PROXY_PORT_001_TX = 31,
+ IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX = 32,
+ IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX = 33,
AFE_MAX_PORTS
};
diff --git a/lib/Kconfig b/lib/Kconfig
index f1621d5..8437e36 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -387,4 +387,11 @@
The kernel drivers receive the QMI message over a transport
and then decode it into a C structure.
+config QMI_ENCDEC_DEBUG
+ bool
+ help
+ Kernel config option to enable debugging QMI Encode/Decode
+ library. This will log the information regarding the element
+ and message being encoded & decoded.
+
endmenu
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
index d759885..40273d0 100644
--- a/lib/qmi_encdec.c
+++ b/lib/qmi_encdec.c
@@ -25,6 +25,61 @@
#define TLV_LEN_SIZE sizeof(uint16_t)
#define TLV_TYPE_SIZE sizeof(uint8_t)
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+ const u8 *ptr = buf; \
+ int i, linelen, remaining = buf_len; \
+ int rowsize = 16, groupsize = 1; \
+ unsigned char linebuf[256]; \
+ for (i = 0; i < buf_len; i += rowsize) { \
+ linelen = min(remaining, rowsize); \
+ remaining -= linelen; \
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+ linebuf, sizeof(linebuf), false); \
+ pr_debug("%s: %s\n", prefix_str, linebuf); \
+ } \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
static int _qmi_kernel_encode(struct elem_info *ei_array,
void *out_buf, void *in_c_struct,
int enc_level);
@@ -232,6 +287,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_encode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+ temp_ei->elem_size, buf_src);
UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
encoded_bytes, tlv_len, encode_tlv, rc);
break;
@@ -253,6 +310,7 @@
if (encode_tlv && enc_level == 1) {
QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
tlv_pointer = buf_dst;
tlv_len = 0;
@@ -260,6 +318,7 @@
encode_tlv = 0;
}
}
+ QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
return encoded_bytes;
}
@@ -419,11 +478,13 @@
void *buf_src = in_buf;
int rc;
+ QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
while (decoded_bytes < in_buf_len) {
if (dec_level == 1) {
tlv_pointer = buf_src;
QMI_ENCDEC_DECODE_TLV(&tlv_type,
&tlv_len, tlv_pointer);
+ QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
temp_ei = find_ei(ei_array, tlv_type);
@@ -470,6 +531,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_decode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+ temp_ei->elem_size, buf_dst);
UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
break;
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 412090f..fd9d825 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -2836,7 +2836,7 @@
if (dai->id <= NUM_CODEC_DAIS) {
if (sitar->dai[dai->id].ch_mask) {
active = 1;
- pr_debug("%s(): Codec DAI: chmask[%d] = 0x%x\n",
+ pr_debug("%s(): Codec DAI: chmask[%d] = 0x%lx\n",
__func__, dai->id,
sitar->dai[dai->id].ch_mask);
}
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index f28fd774..e11b985 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -3035,7 +3035,7 @@
msecs_to_jiffies(300));
}
/* apply the digital gain after the decimator is enabled*/
- if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+ if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg))
snd_soc_write(codec,
tx_digital_gain_reg[w->shift + offset],
snd_soc_read(codec,
@@ -4128,7 +4128,7 @@
if (dai->id <= NUM_CODEC_DAIS) {
if (tabla->dai[dai->id].ch_mask) {
active = 1;
- pr_debug("%s(): Codec DAI: chmask[%d] = 0x%x\n",
+ pr_debug("%s(): Codec DAI: chmask[%d] = 0x%lx\n",
__func__, dai->id, tabla->dai[dai->id].ch_mask);
}
}
@@ -7863,7 +7863,7 @@
port_id = i*8 + j;
for (k = 0; k < ARRAY_SIZE(tabla_dai); k++) {
ch_mask_temp = 1 << port_id;
- pr_debug("%s: tabla_p->dai[%d].ch_mask = 0x%x\n",
+ pr_debug("%s: tabla_p->dai[%d].ch_mask = 0x%lx\n",
__func__, k,
tabla_p->dai[k].ch_mask);
if (ch_mask_temp &
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index ed414d4..5ffb60a 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -18,6 +18,8 @@
#include <linux/printk.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
#include <linux/mfd/wcd9xxx/core.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
@@ -45,6 +47,13 @@
#define TAIKO_TX_PORT_NUMBER 16
#define TAIKO_I2S_MASTER_MODE_MASK 0x08
+#define TAIKO_MCLK_CLK_12P288MHZ 12288000
+#define TAIKO_MCLK_CLK_9P6HZ 9600000
+
+#define TAIKO_SLIM_CLOSE_TIMEOUT 1000
+#define TAIKO_SLIM_IRQ_OVERFLOW (1 << 0)
+#define TAIKO_SLIM_IRQ_UNDERFLOW (1 << 1)
+#define TAIKO_SLIM_IRQ_PORT_CLOSED (1 << 2)
enum {
AIF1_PB = 0,
@@ -97,7 +106,8 @@
};
enum {
- COMPANDER_1 = 0,
+ COMPANDER_0,
+ COMPANDER_1,
COMPANDER_2,
COMPANDER_MAX,
};
@@ -203,6 +213,7 @@
};
static const u32 comp_shift[] = {
+ 4, /* Compander 0's clock source is on interpolator 7 */
0,
2,
};
@@ -214,31 +225,46 @@
COMPANDER_2,
COMPANDER_2,
COMPANDER_2,
+ COMPANDER_0,
COMPANDER_MAX,
};
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
- .peak_det_timeout = 0x2,
- .rms_meter_div_fact = 0x8 << 4,
- .rms_meter_resamp_fact = 0x21,
+ /* 8 Khz */
+ .peak_det_timeout = 0x02,
+ .rms_meter_div_fact = 0x09,
+ .rms_meter_resamp_fact = 0x06,
},
{
- .peak_det_timeout = 0x3,
- .rms_meter_div_fact = 0x9 << 4,
+ /* 16 Khz */
+ .peak_det_timeout = 0x03,
+ .rms_meter_div_fact = 0x0A,
+ .rms_meter_resamp_fact = 0x0C,
+ },
+ {
+ /* 32 Khz */
+ .peak_det_timeout = 0x05,
+ .rms_meter_div_fact = 0x0B,
+ .rms_meter_resamp_fact = 0x1E,
+ },
+ {
+ /* 48 Khz */
+ .peak_det_timeout = 0x05,
+ .rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
-
{
- .peak_det_timeout = 0x5,
- .rms_meter_div_fact = 0xB << 4,
- .rms_meter_resamp_fact = 0x28,
+ /* 96 Khz */
+ .peak_det_timeout = 0x06,
+ .rms_meter_div_fact = 0x0C,
+ .rms_meter_resamp_fact = 0x50,
},
-
{
- .peak_det_timeout = 0x5,
- .rms_meter_div_fact = 0xB << 4,
- .rms_meter_resamp_fact = 0x28,
+ /* 192 Khz */
+ .peak_det_timeout = 0x07,
+ .rms_meter_div_fact = 0xD,
+ .rms_meter_resamp_fact = 0xA0,
},
};
@@ -549,194 +575,167 @@
return 0;
}
-static int taiko_compander_gain_offset(
- struct snd_soc_codec *codec, u32 enable,
- unsigned int reg, int mask, int event)
-{
- int pa_mode = snd_soc_read(codec, reg) & mask;
- int gain_offset = 0;
- /* if PMU && enable is 1-> offset is 3
- * if PMU && enable is 0-> offset is 0
- * if PMD && pa_mode is PA -> offset is 0: PMU compander is off
- * if PMD && pa_mode is comp -> offset is -3: PMU compander is on.
- */
-
- if (SND_SOC_DAPM_EVENT_ON(event) && (enable != 0))
- gain_offset = TAIKO_COMP_DIGITAL_GAIN_OFFSET;
- if (SND_SOC_DAPM_EVENT_OFF(event) && (pa_mode == 0))
- gain_offset = -TAIKO_COMP_DIGITAL_GAIN_OFFSET;
- return gain_offset;
-}
-
-
-static int taiko_config_gain_compander(
- struct snd_soc_codec *codec,
- u32 compander, u32 enable, int event)
-{
- int value = 0;
- int mask = 1 << 5;
- int gain = 0;
- int gain_offset;
- if (compander >= COMPANDER_MAX) {
- pr_err("%s: Error, invalid compander channel\n", __func__);
- return -EINVAL;
- }
-
- if ((enable == 0) || SND_SOC_DAPM_EVENT_OFF(event))
- value = 1 << 4;
-
- if (compander == COMPANDER_1) {
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_HPH_L_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_HPH_L_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_HPH_R_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_HPH_R_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- } else if (compander == COMPANDER_2) {
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_LINE_1_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_LINE_1_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_LINE_3_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_LINE_3_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_LINE_2_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_LINE_2_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- gain_offset = taiko_compander_gain_offset(codec, enable,
- TAIKO_A_RX_LINE_4_GAIN, mask, event);
- snd_soc_update_bits(codec, TAIKO_A_RX_LINE_4_GAIN, mask, value);
- gain = snd_soc_read(codec, TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL);
- snd_soc_update_bits(codec, TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL,
- 0xFF, gain - gain_offset);
- }
- return 0;
-}
static int taiko_get_compander(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
int comp = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->max;
+ kcontrol->private_value)->shift;
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = taiko->comp_enabled[comp];
-
return 0;
}
static int taiko_set_compander(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
int comp = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->max;
+ kcontrol->private_value)->shift;
int value = ucontrol->value.integer.value[0];
- if (value == taiko->comp_enabled[comp]) {
- pr_debug("%s: compander #%d enable %d no change\n",
- __func__, comp, value);
- return 0;
- }
+ pr_debug("%s: Compander %d enable current %d, new %d\n",
+ __func__, comp, taiko->comp_enabled[comp], value);
taiko->comp_enabled[comp] = value;
return 0;
}
+static int taiko_config_gain_compander(struct snd_soc_codec *codec,
+ int comp, bool enable)
+{
+ int ret = 0;
+
+ switch (comp) {
+ case COMPANDER_0:
+ snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_GAIN,
+ 1 << 2, !enable << 2);
+ break;
+ case COMPANDER_1:
+ snd_soc_update_bits(codec, TAIKO_A_RX_HPH_L_GAIN,
+ 1 << 5, !enable << 5);
+ snd_soc_update_bits(codec, TAIKO_A_RX_HPH_R_GAIN,
+ 1 << 5, !enable << 5);
+ break;
+ case COMPANDER_2:
+ snd_soc_update_bits(codec, TAIKO_A_RX_LINE_1_GAIN,
+ 1 << 5, !enable << 5);
+ snd_soc_update_bits(codec, TAIKO_A_RX_LINE_3_GAIN,
+ 1 << 5, !enable << 5);
+ snd_soc_update_bits(codec, TAIKO_A_RX_LINE_2_GAIN,
+ 1 << 5, !enable << 5);
+ snd_soc_update_bits(codec, TAIKO_A_RX_LINE_4_GAIN,
+ 1 << 5, !enable << 5);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void taiko_discharge_comp(struct snd_soc_codec *codec, int comp)
+{
+ /* Update RSM to 1, DIVF to 5 */
+ snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+ snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
+ 1 << 5);
+ /* Wait for 1ms */
+ usleep_range(1000, 1000);
+}
static int taiko_config_compander(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
+ struct snd_kcontrol *kcontrol, int event)
{
+ int mask, emask;
+ bool timedout;
+ unsigned long timeout;
struct snd_soc_codec *codec = w->codec;
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
- u32 rate = taiko->comp_fs[w->shift];
+ const int comp = w->shift;
+ const u32 rate = taiko->comp_fs[comp];
+ const struct comp_sample_dependent_params *comp_params =
+ &comp_samp_params[rate];
- pr_debug("%s: %s event %d enabled = %d", __func__, w->name,
- event, taiko->comp_enabled[w->shift]);
+ pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
+ w->name, event, comp, taiko->comp_enabled[comp]);
+
+ if (!taiko->comp_enabled[comp])
+ return 0;
+
+ /* Compander 0 has single channel */
+ mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
+ emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- if (taiko->comp_enabled[w->shift] != 0) {
- /* Enable both L/R compander clocks */
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CLK_RX_B2_CTL,
- 0x03 << comp_shift[w->shift],
- 0x03 << comp_shift[w->shift]);
- /* Clar the HALT for the compander*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 1 << 2, 0);
- /* Toggle compander reset bits*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
- 0x03 << comp_shift[w->shift],
- 0x03 << comp_shift[w->shift]);
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
- 0x03 << comp_shift[w->shift], 0);
- taiko_config_gain_compander(codec, w->shift, 1, event);
- /* Update the RMS meter resampling*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_COMP1_B3_CTL +
- w->shift * 8, 0xFF, 0x01);
- /* Wait for 1ms*/
- usleep_range(1000, 1000);
- }
+ /* Set gain source to compander */
+ taiko_config_gain_compander(codec, comp, true);
+ /* Enable RX interpolation path clocks */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
+ mask << comp_shift[comp],
+ mask << comp_shift[comp]);
+
+ taiko_discharge_comp(codec, comp);
+
+ /* Clear compander halt */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+ (comp * 8),
+ 1 << 2, 0);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp],
+ mask << comp_shift[comp]);
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ mask << comp_shift[comp], 0);
break;
case SND_SOC_DAPM_POST_PMU:
- /* Set sample rate dependent paramater*/
- if (taiko->comp_enabled[w->shift] != 0) {
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_FS_CFG +
- w->shift * 8, 0x03, rate);
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B2_CTL +
- w->shift * 8, 0x0F,
- comp_samp_params[rate].peak_det_timeout);
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B2_CTL +
- w->shift * 8, 0xF0,
- comp_samp_params[rate].rms_meter_div_fact);
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B3_CTL +
- w->shift * 8, 0xFF,
- comp_samp_params[rate].rms_meter_resamp_fact);
- /* Compander enable -> 0x370/0x378*/
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 0x03, 0x03);
- }
+ /* Set sample rate dependent paramater */
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
+ 0x07, rate);
+ snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8),
+ comp_params->rms_meter_resamp_fact);
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0x0F, comp_params->peak_det_timeout);
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0xF0, comp_params->rms_meter_div_fact << 4);
+ /* Compander enable */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+ (comp * 8), emask, emask);
break;
case SND_SOC_DAPM_PRE_PMD:
- /* Halt the compander*/
- if (taiko->comp_enabled[w->shift] != 0) {
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 1 << 2, 1 << 2);
- }
+ /* Halt compander */
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
+ 1 << 2, 1 << 2);
+ /* Wait up to a second for shutdown complete */
+ timeout = jiffies + HZ;
+ do {
+ if ((snd_soc_read(codec,
+ TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS +
+ (comp * 8)) & mask) == mask)
+ break;
+ } while (!(timedout = time_after(jiffies, timeout)));
+ pr_debug("%s: Compander %d shutdown %s in %dms\n", __func__,
+ comp, timedout ? "timedout" : "completed",
+ jiffies_to_msecs(timeout - HZ - jiffies));
break;
case SND_SOC_DAPM_POST_PMD:
- /* Restore the gain */
- if (taiko->comp_enabled[w->shift] != 0) {
- taiko_config_gain_compander(codec, w->shift,
- taiko->comp_enabled[w->shift], event);
- /* Disable the compander*/
- snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
- w->shift * 8, 0x03, 0x00);
- /* Turn off the clock for compander in pair*/
- snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
- 0x03 << comp_shift[w->shift], 0);
- }
+ /* Disable compander */
+ snd_soc_update_bits(codec,
+ TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
+ emask, 0x00);
+ /* Turn off the clock for compander in pair */
+ snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
+ mask << comp_shift[comp], 0);
+ /* Set gain source to register */
+ taiko_config_gain_compander(codec, comp, false);
break;
}
return 0;
@@ -960,10 +959,12 @@
SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
taiko_get_iir_band_audio_mixer, taiko_put_iir_band_audio_mixer),
- SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, 1, COMPANDER_1, 0,
- taiko_get_compander, taiko_set_compander),
- SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, 0, COMPANDER_2, 0,
- taiko_get_compander, taiko_set_compander),
+ SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0,
+ taiko_get_compander, taiko_set_compander),
+ SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+ taiko_get_compander, taiko_set_compander),
+ SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+ taiko_get_compander, taiko_set_compander),
};
@@ -1538,45 +1539,50 @@
return -EINVAL;
}
}
- switch (dai_id) {
- case AIF1_CAP:
- case AIF2_CAP:
- case AIF3_CAP:
- /* only add to the list if value not set
- */
- if (enable && !(widget->value & 1 << port_id)) {
- if (wcd9xxx_tx_vport_validation(
+ if (taiko_p->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ switch (dai_id) {
+ case AIF1_CAP:
+ case AIF2_CAP:
+ case AIF3_CAP:
+ /* only add to the list if value not set
+ */
+ if (enable && !(widget->value & 1 << port_id)) {
+ if (wcd9xxx_tx_vport_validation(
vport_check_table[dai_id],
port_id,
taiko_p->dai)) {
- pr_info("%s: TX%u is used by other virtual port\n",
- __func__, port_id + 1);
- mutex_unlock(&codec->mutex);
- return -EINVAL;
- }
- widget->value |= 1 << port_id;
- list_add_tail(&core->tx_chs[port_id].list,
+ pr_debug("%s: TX%u is used by other\n"
+ "virtual port\n",
+ __func__, port_id + 1);
+ mutex_unlock(&codec->mutex);
+ return -EINVAL;
+ }
+ widget->value |= 1 << port_id;
+ list_add_tail(&core->tx_chs[port_id].list,
&taiko_p->dai[dai_id].wcd9xxx_ch_list
- );
- } else if (!enable && (widget->value & 1 << port_id)) {
- widget->value &= ~(1 << port_id);
- list_del_init(&core->tx_chs[port_id].list);
- } else {
- if (enable)
- pr_info("%s: TX%u port is used by this virtual port\n",
- __func__, port_id + 1);
- else
- pr_info("%s: TX%u port is not used by this virtual port\n",
- __func__, port_id + 1);
- /* avoid update power function */
+ );
+ } else if (!enable && (widget->value & 1 << port_id)) {
+ widget->value &= ~(1 << port_id);
+ list_del_init(&core->tx_chs[port_id].list);
+ } else {
+ if (enable)
+ pr_debug("%s: TX%u port is used by\n"
+ "this virtual port\n",
+ __func__, port_id + 1);
+ else
+ pr_debug("%s: TX%u port is not used by\n"
+ "this virtual port\n",
+ __func__, port_id + 1);
+ /* avoid update power function */
+ mutex_unlock(&codec->mutex);
+ return 0;
+ }
+ break;
+ default:
+ pr_err("Unknown AIF %d\n", dai_id);
mutex_unlock(&codec->mutex);
- return 0;
+ return -EINVAL;
}
- break;
- default:
- pr_err("Unknown AIF %d\n", dai_id);
- mutex_unlock(&codec->mutex);
- return -EINVAL;
}
pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
widget->name, widget->sname, widget->value, widget->shift);
@@ -2241,7 +2247,7 @@
msecs_to_jiffies(300));
}
/* apply the digital gain after the decimator is enabled*/
- if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+ if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg))
snd_soc_write(codec,
tx_digital_gain_reg[w->shift + offset],
snd_soc_read(codec,
@@ -2438,10 +2444,10 @@
{"SLIM RX3", NULL, "RX_I2S_CLK"},
{"SLIM RX4", NULL, "RX_I2S_CLK"},
- {"SLIM TX7", NULL, "TX_I2S_CLK"},
- {"SLIM TX8", NULL, "TX_I2S_CLK"},
- {"SLIM TX9", NULL, "TX_I2S_CLK"},
- {"SLIM TX10", NULL, "TX_I2S_CLK"},
+ {"SLIM TX7 MUX", NULL, "TX_I2S_CLK"},
+ {"SLIM TX8 MUX", NULL, "TX_I2S_CLK"},
+ {"SLIM TX9 MUX", NULL, "TX_I2S_CLK"},
+ {"SLIM TX10 MUX", NULL, "TX_I2S_CLK"},
};
static const struct snd_soc_dapm_route audio_map[] = {
@@ -2665,6 +2671,7 @@
{"LINEOUT4 DAC", NULL, "RX_BIAS"},
{"SPK DAC", NULL, "RX_BIAS"},
+ {"RX7 MIX1", NULL, "COMP0_CLK"},
{"RX1 MIX1", NULL, "COMP1_CLK"},
{"RX2 MIX1", NULL, "COMP1_CLK"},
{"RX3 MIX1", NULL, "COMP2_CLK"},
@@ -3101,7 +3108,11 @@
static int taiko_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
- pr_debug("%s\n", __func__);
+ struct snd_soc_codec *codec = dai->codec;
+ if (freq == TAIKO_MCLK_CLK_12P288MHZ)
+ snd_soc_write(codec, TAIKO_A_CHIP_CTL, 0x04);
+ else if (freq == TAIKO_MCLK_CLK_9P6HZ)
+ snd_soc_write(codec, TAIKO_A_CHIP_CTL, 0x0A);
return 0;
}
@@ -3619,6 +3630,37 @@
},
};
+static int taiko_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
+ bool up)
+{
+ int ret = 0;
+ struct wcd9xxx_ch *ch;
+
+ if (up) {
+ list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+ ret = wcd9xxx_get_slave_port(ch->ch_num);
+ if (ret < 0) {
+ pr_err("%s: Invalid slave port ID: %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ } else {
+ set_bit(ret, &dai->ch_mask);
+ }
+ }
+ } else {
+ ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
+ msecs_to_jiffies(
+ TAIKO_SLIM_CLOSE_TIMEOUT));
+ if (!ret) {
+ pr_err("%s: Slim close tx/rx wait timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
static int taiko_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -3626,7 +3668,7 @@
struct wcd9xxx *core;
struct snd_soc_codec *codec = w->codec;
struct taiko_priv *taiko_p = snd_soc_codec_get_drvdata(codec);
- u32 ret = 0;
+ int ret = 0;
struct wcd9xxx_codec_dai_data *dai;
core = dev_get_drvdata(codec->dev->parent);
@@ -3645,6 +3687,7 @@
switch (event) {
case SND_SOC_DAPM_POST_PMU:
+ (void) taiko_codec_enable_slim_chmask(dai, true);
ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
dai->rate, dai->bit_width,
&dai->grph);
@@ -3652,7 +3695,14 @@
case SND_SOC_DAPM_POST_PMD:
ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
dai->grph);
- usleep_range(15000, 15000);
+ ret = taiko_codec_enable_slim_chmask(dai, false);
+ if (ret < 0) {
+ ret = wcd9xxx_disconnect_port(core,
+ &dai->wcd9xxx_ch_list,
+ dai->grph);
+ pr_debug("%s: Disconnect RX port, ret = %d\n",
+ __func__, ret);
+ }
break;
}
return ret;
@@ -3683,6 +3733,7 @@
dai = &taiko_p->dai[w->shift];
switch (event) {
case SND_SOC_DAPM_POST_PMU:
+ (void) taiko_codec_enable_slim_chmask(dai, true);
ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
dai->rate, dai->bit_width,
&dai->grph);
@@ -3690,6 +3741,14 @@
case SND_SOC_DAPM_POST_PMD:
ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
dai->grph);
+ ret = taiko_codec_enable_slim_chmask(dai, false);
+ if (ret < 0) {
+ ret = wcd9xxx_disconnect_port(core,
+ &dai->wcd9xxx_ch_list,
+ dai->grph);
+ pr_debug("%s: Disconnect RX port, ret = %d\n",
+ __func__, ret);
+ }
break;
}
return ret;
@@ -3933,10 +3992,13 @@
SND_SOC_DAPM_SUPPLY("LDO_H", TAIKO_A_LDO_H_MODE_1, 7, 0,
taiko_codec_enable_ldo_h, SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 1, 0,
+ SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
+ taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
@@ -4167,34 +4229,69 @@
};
-static unsigned long slimbus_value;
-
static irqreturn_t taiko_slimbus_irq(int irq, void *data)
{
struct taiko_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
- int i, j;
+ unsigned long status = 0;
+ int i, j, port_id, k;
+ u32 bit;
u8 val;
+ bool tx, cleared;
- for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) {
- slimbus_value = wcd9xxx_interface_reg_read(codec->control_data,
- TAIKO_SLIM_PGD_PORT_INT_STATUS0 + i);
- for_each_set_bit(j, &slimbus_value, BITS_PER_BYTE) {
- val = wcd9xxx_interface_reg_read(codec->control_data,
- TAIKO_SLIM_PGD_PORT_INT_SOURCE0 + i*8 + j);
- if (val & 0x1)
- pr_err_ratelimited(
- "overflow error on port %x, value %x\n",
- i*8 + j, val);
- if (val & 0x2)
- pr_err_ratelimited(
- "underflow error on port %x, value %x\n",
- i*8 + j, val);
+ for (i = TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
+ i <= TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
+ val = wcd9xxx_interface_reg_read(codec->control_data, i);
+ status |= ((u32)val << (8 * j));
+ }
+
+ for_each_set_bit(j, &status, 32) {
+ tx = (j >= 16 ? true : false);
+ port_id = (tx ? j - 16 : j);
+ val = wcd9xxx_interface_reg_read(codec->control_data,
+ TAIKO_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
+ if (val & TAIKO_SLIM_IRQ_OVERFLOW)
+ pr_err_ratelimited(
+ "%s: overflow error on %s port %d, value %x\n",
+ __func__, (tx ? "TX" : "RX"), port_id, val);
+ if (val & TAIKO_SLIM_IRQ_UNDERFLOW)
+ pr_err_ratelimited(
+ "%s: underflow error on %s port %d, value %x\n",
+ __func__, (tx ? "TX" : "RX"), port_id, val);
+ if (val & TAIKO_SLIM_IRQ_PORT_CLOSED) {
+ /*
+ * INT SOURCE register starts from RX to TX
+ * but port number in the ch_mask is in opposite way
+ */
+ bit = (tx ? j - 16 : j + 16);
+ pr_debug("%s: %s port %d closed value %x, bit %u\n",
+ __func__, (tx ? "TX" : "RX"), port_id, val,
+ bit);
+ for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
+ pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n",
+ __func__, k, priv->dai[k].ch_mask);
+ if (test_and_clear_bit(bit,
+ &priv->dai[k].ch_mask)) {
+ cleared = true;
+ if (!priv->dai[k].ch_mask)
+ wake_up(&priv->dai[k].dai_wait);
+ /*
+ * There are cases when multiple DAIs
+ * might be using the same slimbus
+ * channel. Hence don't break here.
+ */
+ }
+ }
+ WARN(!cleared,
+ "Couldn't find slimbus %s port %d for closing\n",
+ (tx ? "TX" : "RX"), port_id);
}
wcd9xxx_interface_reg_write(codec->control_data,
- TAIKO_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
-
+ TAIKO_SLIM_PGD_PORT_INT_CLR_RX_0 +
+ (j / 8),
+ 1 << (j % 8));
}
+
return IRQ_HANDLED;
}
@@ -4536,6 +4633,7 @@
{TAIKO_A_RX_LINE_2_GAIN, 0x20, 0x20},
{TAIKO_A_RX_LINE_3_GAIN, 0x20, 0x20},
{TAIKO_A_RX_LINE_4_GAIN, 0x20, 0x20},
+ {TAIKO_A_SPKR_DRV_GAIN, 0x04, 0x04},
/* CLASS H config */
{TAIKO_A_CDC_CONN_CLSH_CTL, 0x3C, 0x14},
@@ -4586,6 +4684,13 @@
{TAIKO_A_CDC_CLK_DMIC_B1_CTL, 0xEE, 0x22},
{TAIKO_A_CDC_CLK_DMIC_B2_CTL, 0x0E, 0x02},
+ /* Compander zone selection */
+ {TAIKO_A_CDC_COMP0_B4_CTL, 0x3F, 0x37},
+ {TAIKO_A_CDC_COMP1_B4_CTL, 0x3F, 0x37},
+ {TAIKO_A_CDC_COMP2_B4_CTL, 0x3F, 0x37},
+ {TAIKO_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
+ {TAIKO_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
+ {TAIKO_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
};
static void taiko_codec_init_reg(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wcd9320.h b/sound/soc/codecs/wcd9320.h
index 7bc5a57..1fff80c 100644
--- a/sound/soc/codecs/wcd9320.h
+++ b/sound/soc/codecs/wcd9320.h
@@ -23,6 +23,7 @@
#define TAIKO_CACHE_SIZE TAIKO_NUM_REGISTERS
#define TAIKO_REG_VAL(reg, val) {reg, 0, val}
+#define TAIKO_MCLK_ID 0
extern const u8 taiko_reg_readable[TAIKO_CACHE_SIZE];
extern const u8 taiko_reset_reg_defaults[TAIKO_CACHE_SIZE];
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 894e114..d5cada7 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -200,4 +200,15 @@
default n
help
To add support for SoC audio on APQ8060 board
+
+config SND_SOC_MDM9625
+ tristate "SoC Machine driver for MDM9625 boards"
+ depends on ARCH_MSM9625
+ select SND_SOC_QDSP6V2
+ select SND_SOC_MSM_STUB
+ select SND_SOC_WCD9320
+ select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_DYNAMIC_MINORS
+ help
+ To add support for SoC audio on MDM9625 boards.
endmenu
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index a261184..a4c365a 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -84,3 +84,6 @@
snd-soc-qdsp6v2-objs := msm-dai-fe.o msm-dai-stub.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+#for MDM9625 sound card driver
+snd-soc-mdm9625-objs := mdm9625.o
+obj-$(CONFIG_SND_SOC_MDM9625) += snd-soc-mdm9625.o
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 59e220d..76cd625 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -1702,6 +1702,19 @@
return 0;
}
+
+static int mdm9615_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ pr_debug("%s()\n", __func__);
+ rate->min = rate->max = 48000;
+
+ return 0;
+}
+
static int mdm9615_aux_pcm_get_gpios(void)
{
int ret = 0;
@@ -2134,6 +2147,43 @@
.be_hw_params_fixup = mdm9615_auxpcm_be_params_fixup,
.ops = &mdm9615_sec_auxpcm_be_ops,
},
+ /* Incall Music BACK END DAI Link */
+ {
+ .name = LPASS_BE_VOICE_PLAYBACK_TX,
+ .stream_name = "Voice Farend Playback",
+ .cpu_dai_name = "msm-dai-q6.32773",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Uplink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_TX,
+ .stream_name = "Voice Uplink Capture",
+ .cpu_dai_name = "msm-dai-q6.32772",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Downlink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_RX,
+ .stream_name = "Voice Downlink Capture",
+ .cpu_dai_name = "msm-dai-q6.32771",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ .ignore_pmdown_time = 1, /* this dailink has playback support */
+ },
};
static struct snd_soc_dai_link mdm9615_dai_i2s_tabla[] = {
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
new file mode 100644
index 0000000..b1822f6
--- /dev/null
+++ b/sound/soc/msm/mdm9625.c
@@ -0,0 +1,798 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/qpnp/clkdiv.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/jack.h>
+#include <asm/mach-types.h>
+#include <mach/socinfo.h>
+#include <qdsp6v2/msm-pcm-routing-v2.h>
+#include "../codecs/wcd9320.h"
+
+/* MI2S GPIO SECTION */
+
+#define GPIO_MI2S_WS 12
+#define GPIO_MI2S_SCLK 15
+#define GPIO_MI2S_DOUT 14
+#define GPIO_MI2S_DIN 13
+#define GPIO_MI2S_MCLK 71
+
+/* Spk control */
+#define MDM9625_SPK_ON 1
+
+/* MDM9625 run Taiko at 12.288 Mhz.
+ * At present MDM supports 12.288mhz
+ * only. Taiko supports 9.6 MHz also.
+ */
+#define MDM_MCLK_CLK_12P288MHZ 12288000
+#define MDM_MCLK_CLK_9P6HZ 9600000
+#define MDM_IBIT_CLK_DIV_1P56MHZ 7
+
+/* Machine driver Name*/
+#define MDM9625_MACHINE_DRV_NAME "mdm9625-asoc-taiko"
+
+struct mdm9625_machine_data {
+ u32 mclk_freq;
+};
+
+/* MI2S clock */
+struct mdm_mi2s_clk {
+ struct clk *cdc_cr_clk;
+ struct clk *cdc_osr_clk;
+ struct clk *cdc_bit_clk;
+ bool clk_enable;
+
+};
+static struct mdm_mi2s_clk prim_clk;
+
+/* I2S GPIO */
+struct request_gpio {
+ unsigned gpio_no;
+ char *gpio_name;
+};
+static bool cdc_mclk_init;
+static struct mutex cdc_mclk_mutex;
+static int mdm9625_mi2s_rx_ch = 1;
+static int mdm9625_mi2s_tx_ch = 1;
+static int msm_spk_control;
+static atomic_t mi2s_ref_count;
+
+/* MI2S GPIO CONFIG */
+static struct request_gpio mi2s_gpio[] = {
+ {
+ .gpio_no = GPIO_MI2S_WS,
+ .gpio_name = "MI2S_WS",
+ },
+ {
+ .gpio_no = GPIO_MI2S_SCLK,
+ .gpio_name = "MI2S_SCLK",
+ },
+ {
+ .gpio_no = GPIO_MI2S_DOUT,
+ .gpio_name = "MI2S_DOUT",
+ },
+ {
+ .gpio_no = GPIO_MI2S_DIN,
+ .gpio_name = "MI2S_DIN",
+ },
+ {
+ .gpio_no = GPIO_MI2S_MCLK,
+ .gpio_name = "MI2S_MCLK",
+ },
+};
+
+static int mdm9625_enable_codec_ext_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm);
+
+void *def_taiko_mbhc_cal(void);
+
+static struct wcd9xxx_mbhc_config mbhc_cfg = {
+ .read_fw_bin = false,
+ .calibration = NULL,
+ .micbias = MBHC_MICBIAS2,
+ .mclk_cb_fn = mdm9625_enable_codec_ext_clk,
+ .mclk_rate = MDM_MCLK_CLK_12P288MHZ,
+ .gpio = 0,
+ .gpio_irq = 0,
+ .gpio_level_insert = 1,
+ .detect_extn_cable = true,
+ .insert_detect = true,
+ .swap_gnd_mic = NULL,
+};
+
+#define WCD9XXX_MBHC_DEF_BUTTONS 8
+#define WCD9XXX_MBHC_DEF_RLOADS 5
+
+
+static bool gpio_enable;
+
+static int mdm9625_set_mi2s_gpio(void)
+{
+ int rtn = 0;
+ int i;
+ int j;
+
+ if (gpio_enable == false) {
+ for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++) {
+ rtn = gpio_request(mi2s_gpio[i].gpio_no,
+ mi2s_gpio[i].gpio_name);
+ pr_debug("%s: gpio = %d, gpio name = %s\n"
+ "rtn = %d\n", __func__,
+ mi2s_gpio[i].gpio_no,
+ mi2s_gpio[i].gpio_name,
+ rtn);
+ if (rtn) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, mi2s_gpio[i].gpio_no);
+ /* Release all the GPIO on failure */
+ for (j = i; j >= 0; j--)
+ gpio_free(mi2s_gpio[j].gpio_no);
+ goto err;
+ }
+ }
+ gpio_enable = true;
+ }
+err:
+ return rtn;
+}
+
+static int mdm9625_mi2s_free_gpios(void)
+{
+ int i;
+ pr_debug("%s:", __func__);
+ for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++)
+ gpio_free(mi2s_gpio[i].gpio_no);
+ gpio_enable = false;
+ return 0;
+}
+static int mdm9625_mi2s_clk_ctl(struct snd_soc_pcm_runtime *rtd, bool enable)
+{
+ struct mdm_mi2s_clk *clk = &prim_clk;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_card *card = rtd->card;
+ struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s:platform data is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (enable) {
+ if (clk->clk_enable == true) {
+ pr_info("%s:Device clock already enabled\n", __func__);
+ return 0;
+ }
+ /* Set up core clock. */
+ clk->cdc_cr_clk = clk_get(cpu_dai->dev, "core_clk");
+ if (IS_ERR(clk->cdc_cr_clk)) {
+ pr_err("%s: Failed to Core clk %ld\n"
+ "CPU dai name %s\n", __func__,
+ PTR_ERR(clk->cdc_cr_clk),
+ cpu_dai->dev->driver->name);
+ return -ENODEV ;
+ }
+ /* osr clock */
+ clk->cdc_osr_clk = clk_get(cpu_dai->dev, "osr_clk");
+ if (IS_ERR(clk->cdc_osr_clk)) {
+ pr_err("%s: Failed to request OSR %ld\n"
+ "CPU dai name %s\n", __func__,
+ PTR_ERR(clk->cdc_osr_clk),
+ cpu_dai->dev->driver->name);
+ clk_put(clk->cdc_cr_clk);
+ return -ENODEV ;
+ }
+ /* ibit clock */
+ clk->cdc_bit_clk = clk_get(cpu_dai->dev, "ibit_clk");
+ if (IS_ERR(clk->cdc_bit_clk)) {
+ pr_err("%s: Failed to request Bit %ld\n"
+ "CPU dai name %s\n", __func__,
+ PTR_ERR(clk->cdc_bit_clk),
+ cpu_dai->dev->driver->name);
+ clk_put(clk->cdc_cr_clk);
+ clk_put(clk->cdc_osr_clk);
+ return -ENODEV ;
+ }
+ /* Set rate core and ibit clock */
+ clk_set_rate(clk->cdc_cr_clk, pdata->mclk_freq);
+ clk_set_rate(clk->cdc_bit_clk, MDM_IBIT_CLK_DIV_1P56MHZ);
+
+ /* Enable clocks. core clock need not be enabled.
+ * Enabling branch clocks indirectly enables
+ * core clock.
+ */
+ ret = clk_prepare_enable(clk->cdc_osr_clk);
+ if (ret != 0) {
+ pr_err("Fail to enable cdc_osr_clk\n");
+ goto exit_osrclk_err;
+ }
+ ret = clk_prepare_enable(clk->cdc_bit_clk);
+ if (ret != 0) {
+ pr_err("Fail to enable cdc_bit_clk\n");
+ goto exit_bclk_err;
+ }
+ clk->clk_enable = true;
+ return ret;
+ } else {
+ clk->clk_enable = false;
+ ret = 0;
+ goto exit_bclk_err;
+ }
+exit_bclk_err:
+ clk_disable_unprepare(clk->cdc_bit_clk);
+ clk_put(clk->cdc_bit_clk);
+exit_osrclk_err:
+ clk_disable_unprepare(clk->cdc_osr_clk);
+ clk_put(clk->cdc_osr_clk);
+ clk_put(clk->cdc_cr_clk);
+ clk->cdc_cr_clk = NULL;
+ clk->cdc_bit_clk = NULL;
+ clk->cdc_osr_clk = NULL;
+ clk->clk_enable = false;
+ return ret;
+}
+
+static void mdm9625_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret;
+ if (atomic_dec_return(&mi2s_ref_count) == 0) {
+ mdm9625_mi2s_free_gpios();
+ ret = mdm9625_mi2s_clk_ctl(rtd, false);
+ if (ret < 0)
+ pr_err("%s:clock disable failed\n", __func__);
+ }
+}
+
+static int mdm9625_mi2s_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret = 0;
+
+ if (atomic_inc_return(&mi2s_ref_count) == 1) {
+ mdm9625_set_mi2s_gpio();
+ ret = mdm9625_mi2s_clk_ctl(rtd, true);
+ if (ret < 0) {
+ pr_err("set format for codec dai failed\n");
+ return ret;
+ }
+ }
+ /* This sets the CONFIG PARAMETER WS_SRC.
+ * 1 means internal clock master mode.
+ * 0 means external clock slave mode.
+ */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt cpu dai failed\n");
+
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt for codec dai failed\n");
+
+ return ret;
+}
+
+static int set_codec_mclk(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret = 0;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_card *card = rtd->card;
+ struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+
+ if (cdc_mclk_init == true)
+ return 0;
+ ret = snd_soc_dai_set_sysclk(codec_dai, TAIKO_MCLK_ID, pdata->mclk_freq,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("%s: Set codec sys clk failed %x", __func__, ret);
+ return ret;
+ }
+ cdc_mclk_init = true;
+ return 0;
+}
+
+static int mdm9625_mi2s_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = mdm9625_mi2s_rx_ch;
+ set_codec_mclk(rtd);
+ return 0;
+}
+
+static int mdm9625_mi2s_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = mdm9625_mi2s_tx_ch;
+ set_codec_mclk(rtd);
+ return 0;
+}
+
+
+static int mdm9625_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm9615_i2s_rx_ch = %d\n", __func__,
+ mdm9625_mi2s_rx_ch);
+ ucontrol->value.integer.value[0] = mdm9625_mi2s_rx_ch - 1;
+ return 0;
+}
+
+static int mdm9625_mi2s_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mdm9625_mi2s_rx_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm9615_i2s_rx_ch = %d\n", __func__,
+ mdm9625_mi2s_rx_ch);
+ return 1;
+}
+
+static int mdm9625_mi2s_tx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm9615_i2s_tx_ch = %d\n", __func__,
+ mdm9625_mi2s_tx_ch);
+ ucontrol->value.integer.value[0] = mdm9625_mi2s_tx_ch - 1;
+ return 0;
+}
+
+static int mdm9625_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mdm9625_mi2s_tx_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm9615_i2s_tx_ch = %d\n", __func__,
+ mdm9625_mi2s_tx_ch);
+ return 1;
+}
+
+
+static int mdm9625_mi2s_get_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+ ucontrol->value.integer.value[0] = msm_spk_control;
+ return 0;
+}
+
+static void mdm_ext_control(struct snd_soc_codec *codec)
+{
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+ mutex_lock(&dapm->codec->mutex);
+ if (msm_spk_control == MDM9625_SPK_ON) {
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+ } else {
+ snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos");
+ snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg");
+ snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos");
+ snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg");
+ }
+ snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
+}
+
+static int mdm9625_mi2s_set_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ pr_debug("%s()\n", __func__);
+ if (msm_spk_control == ucontrol->value.integer.value[0])
+ return 0;
+ msm_spk_control = ucontrol->value.integer.value[0];
+ mdm_ext_control(codec);
+ return 1;
+}
+
+static int mdm9625_enable_codec_ext_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+ pr_debug("%s: enable = %d codec name %s\n", __func__,
+ enable, codec->name);
+ mutex_lock(&cdc_mclk_mutex);
+ if (enable)
+ taiko_mclk_enable(codec, 1, dapm);
+ else
+ taiko_mclk_enable(codec, 0, dapm);
+ mutex_unlock(&cdc_mclk_mutex);
+ return ret;
+}
+
+static int mdm9625_mclk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ pr_debug("%s: event = %d\n", __func__, event);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return mdm9625_enable_codec_ext_clk(w->codec, 1, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return mdm9625_enable_codec_ext_clk(w->codec, 0, true);
+ }
+ return 0;
+}
+
+
+static const struct snd_soc_dapm_widget mdm9625_dapm_widgets[] = {
+
+ SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
+ mdm9625_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SPK("Ext Spk Bottom Pos", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk Bottom Neg", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk Top Pos", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk Top Neg", NULL),
+ SND_SOC_DAPM_MIC("Handset Mic", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic3", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic4", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic5", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic6", NULL),
+};
+
+static const char *const spk_function[] = {"Off", "On"};
+static const char *const mi2s_rx_ch_text[] = {"One", "Two"};
+static const char *const mi2s_tx_ch_text[] = {"One", "Two"};
+
+static const struct soc_enum mdm9625_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, spk_function),
+ SOC_ENUM_SINGLE_EXT(2, mi2s_rx_ch_text),
+ SOC_ENUM_SINGLE_EXT(2, mi2s_tx_ch_text),
+};
+
+static const struct snd_kcontrol_new mdm_snd_controls[] = {
+ SOC_ENUM_EXT("Speaker Function", mdm9625_enum[0],
+ mdm9625_mi2s_get_spk,
+ mdm9625_mi2s_set_spk),
+ SOC_ENUM_EXT("MI2S_RX Channels", mdm9625_enum[1],
+ mdm9625_mi2s_rx_ch_get,
+ mdm9625_mi2s_rx_ch_put),
+ SOC_ENUM_EXT("MI2S_TX Channels", mdm9625_enum[2],
+ mdm9625_mi2s_tx_ch_get,
+ mdm9625_mi2s_tx_ch_put),
+};
+
+static int mdm9625_mi2s_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int err;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ pr_info("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev));
+
+ rtd->pmdown_time = 0;
+ err = snd_soc_add_codec_controls(codec, mdm_snd_controls,
+ ARRAY_SIZE(mdm_snd_controls));
+ if (err < 0)
+ return err;
+
+ snd_soc_dapm_new_controls(dapm, mdm9625_dapm_widgets,
+ ARRAY_SIZE(mdm9625_dapm_widgets));
+
+ /* After DAPM Enable pins alawys
+ * DAPM SYNC needs to be called.
+ */
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+ snd_soc_dapm_sync(dapm);
+
+ /* start mbhc */
+ mdm9625_set_mi2s_gpio();
+ mdm9625_mi2s_clk_ctl(rtd, true);
+ mbhc_cfg.calibration = def_taiko_mbhc_cal();
+ if (mbhc_cfg.calibration)
+ err = taiko_hs_detect(codec, &mbhc_cfg);
+ else
+ err = -ENOMEM;
+ return err;
+}
+
+void *def_taiko_mbhc_cal(void)
+{
+ void *taiko_cal;
+ struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
+ u16 *btn_low, *btn_high;
+ u8 *n_ready, *n_cic, *gain;
+
+ taiko_cal = kzalloc(WCD9XXX_MBHC_CAL_SIZE(WCD9XXX_MBHC_DEF_BUTTONS,
+ WCD9XXX_MBHC_DEF_RLOADS),
+ GFP_KERNEL);
+ if (!taiko_cal) {
+ pr_err("%s: out of memory\n", __func__);
+ return NULL;
+ }
+
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_GENERAL_PTR(taiko_cal)->X) = (Y))
+ S(t_ldoh, 100);
+ S(t_bg_fast_settle, 100);
+ S(t_shutdown_plug_rem, 255);
+ S(mbhc_nsa, 4);
+ S(mbhc_navg, 4);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_DET_PTR(taiko_cal)->X) = (Y))
+ S(mic_current, TAIKO_PID_MIC_5_UA);
+ S(hph_current, TAIKO_PID_MIC_5_UA);
+ S(t_mic_pid, 100);
+ S(t_ins_complete, 250);
+ S(t_ins_retry, 200);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(taiko_cal)->X) = (Y))
+ S(v_no_mic, 30);
+ S(v_hs_max, 2400);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal)->X) = (Y))
+ S(c[0], 62);
+ S(c[1], 124);
+ S(nc, 1);
+ S(n_meas, 3);
+ S(mbhc_nsc, 11);
+ S(n_btn_meas, 1);
+ S(n_btn_con, 2);
+ S(num_btn, WCD9XXX_MBHC_DEF_BUTTONS);
+ S(v_btn_press_delta_sta, 100);
+ S(v_btn_press_delta_cic, 50);
+#undef S
+ btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal);
+ btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_V_BTN_LOW);
+ btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg,
+ MBHC_BTN_DET_V_BTN_HIGH);
+ btn_low[0] = -50;
+ btn_high[0] = 10;
+ btn_low[1] = 11;
+ btn_high[1] = 52;
+ btn_low[2] = 53;
+ btn_high[2] = 94;
+ btn_low[3] = 95;
+ btn_high[3] = 133;
+ btn_low[4] = 134;
+ btn_high[4] = 171;
+ btn_low[5] = 172;
+ btn_high[5] = 208;
+ btn_low[6] = 209;
+ btn_high[6] = 244;
+ btn_low[7] = 245;
+ btn_high[7] = 330;
+ n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
+ n_ready[0] = 80;
+ n_ready[1] = 68;
+ n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_CIC);
+ n_cic[0] = 60;
+ n_cic[1] = 47;
+ gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_GAIN);
+ gain[0] = 11;
+ gain[1] = 9;
+
+ return taiko_cal;
+}
+
+
+static struct snd_soc_ops mdm9625_mi2s_be_ops = {
+ .startup = mdm9625_mi2s_startup,
+ .shutdown = mdm9625_mi2s_snd_shutdown,
+};
+
+/* Digital audio interface connects codec <---> CPU */
+static struct snd_soc_dai_link mdm9625_dai[] = {
+ /* FrontEnd DAI Links */
+ {
+ .name = "MDM9625 Media1",
+ .stream_name = "MultiMedia1",
+ .cpu_dai_name = "MultiMedia1",
+ .platform_name = "msm-pcm-dsp",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ /* This dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+ },
+ {
+ .name = "MSM VoIP",
+ .stream_name = "VoIP",
+ .cpu_dai_name = "VoIP",
+ .platform_name = "msm-voip-dsp",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ /* This dainlink has VOIP support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_VOIP,
+ },
+ {
+ .name = "Circuit-Switch Voice",
+ .stream_name = "CS-Voice",
+ .cpu_dai_name = "CS-VOICE",
+ .platform_name = "msm-pcm-voice",
+ .dynamic = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ /* This dainlink has Voice support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_CS_VOICE,
+ },
+ {
+ .name = "MI2S Hostless",
+ .stream_name = "MI2S Hostless",
+ .cpu_dai_name = "MI2S_TX_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ /* This dainlink has MI2S support */
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ /* Backend DAI Links */
+ {
+ .name = LPASS_BE_MI2S_RX,
+ .stream_name = "MI2S Playback",
+ .cpu_dai_name = "msm-dai-q6-mi2s.0",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "taiko_codec",
+ .codec_dai_name = "taiko_i2s_rx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_MI2S_RX,
+ .init = &mdm9625_mi2s_audrx_init,
+ .be_hw_params_fixup = &mdm9625_mi2s_rx_be_hw_params_fixup,
+ .ops = &mdm9625_mi2s_be_ops,
+ },
+ {
+ .name = LPASS_BE_MI2S_TX,
+ .stream_name = "MI2S Capture",
+ .cpu_dai_name = "msm-dai-q6-mi2s.0",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "taiko_codec",
+ .codec_dai_name = "taiko_i2s_tx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_MI2S_TX,
+ .be_hw_params_fixup = &mdm9625_mi2s_tx_be_hw_params_fixup,
+ .ops = &mdm9625_mi2s_be_ops,
+ },
+};
+
+static struct snd_soc_card snd_soc_card_mdm9625 = {
+ .name = "mdm9625-taiko-i2s-snd-card",
+ .dai_link = mdm9625_dai,
+ .num_links = ARRAY_SIZE(mdm9625_dai),
+};
+
+static __devinit int mdm9625_asoc_machine_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct snd_soc_card *card = &snd_soc_card_mdm9625;
+ struct mdm9625_machine_data *pdata;
+
+ mutex_init(&cdc_mclk_mutex);
+ gpio_enable = false;
+ cdc_mclk_init = false;
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "No platform supplied from device tree\n");
+ return -EINVAL;
+ }
+ pdata = devm_kzalloc(&pdev->dev, sizeof(struct mdm9625_machine_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Can't allocate msm8974_asoc_mach_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, pdata);
+ ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ if (ret)
+ goto err;
+ ret = snd_soc_of_parse_audio_routing(card, "qcom,audio-routing");
+ if (ret)
+ goto err;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,taiko-mclk-clk-freq",
+ &pdata->mclk_freq);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Looking up %s property in node %s failed",
+ "qcom,taiko-mclk-clk-freq",
+ pdev->dev.of_node->full_name);
+ goto err;
+ }
+ /* At present only 12.288MHz is supported on MDM. */
+ if (pdata->mclk_freq != MDM_MCLK_CLK_12P288MHZ) {
+ dev_err(&pdev->dev, "unsupported taiko mclk freq %u\n",
+ pdata->mclk_freq);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err;
+ }
+ return 0;
+err:
+ devm_kfree(&pdev->dev, pdata);
+ return ret;
+}
+
+static int __devexit mdm9625_asoc_machine_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+ pdata->mclk_freq = 0;
+ snd_soc_unregister_card(card);
+ return 0;
+}
+
+static const struct of_device_id msm9625_asoc_machine_of_match[] = {
+ { .compatible = "qcom,mdm9625-audio-taiko", },
+ {},
+};
+
+static struct platform_driver msm9625_asoc_machine_driver = {
+ .driver = {
+ .name = MDM9625_MACHINE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = msm9625_asoc_machine_of_match,
+ },
+ .probe = mdm9625_asoc_machine_probe,
+ .remove = __devexit_p(mdm9625_asoc_machine_remove),
+};
+
+
+module_platform_driver(msm9625_asoc_machine_driver);
+
+MODULE_DESCRIPTION("ALSA SoC msm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" MDM9625_MACHINE_DRV_NAME);
+MODULE_DEVICE_TABLE(of, msm9625_asoc_machine_of_match);
+
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 90c96b4..d0bfb76 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -925,7 +925,6 @@
clk_put(mi2s_bit_clk);
mi2s_bit_clk = NULL;
}
- msm_mi2s_free_gpios();
}
static int configure_mi2s_gpio(void)
@@ -958,7 +957,6 @@
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- configure_mi2s_gpio();
mi2s_bit_clk = clk_get(cpu_dai->dev, "bit_clk");
if (IS_ERR(mi2s_bit_clk))
return PTR_ERR(mi2s_bit_clk);
@@ -1138,7 +1136,6 @@
clk_put(sec_i2s_rx_osr_clk);
sec_i2s_rx_osr_clk = NULL;
}
- mpq8064_sec_i2s_rx_free_gpios();
}
pr_info("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -1177,7 +1174,6 @@
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- configure_sec_i2s_rx_gpio();
sec_i2s_rx_osr_clk = clk_get(cpu_dai->dev, "osr_clk");
if (IS_ERR(sec_i2s_rx_osr_clk)) {
pr_err("Failed to get sec_i2s_rx_osr_clk\n");
@@ -1695,7 +1691,8 @@
kfree(mbhc_cfg.calibration);
return ret;
}
-
+ configure_sec_i2s_rx_gpio();
+ configure_mi2s_gpio();
return ret;
}
@@ -1707,6 +1704,8 @@
pr_err("%s: Not the right machine type\n", __func__);
return ;
}
+ mpq8064_sec_i2s_rx_free_gpios();
+ msm_mi2s_free_gpios();
platform_device_unregister(msm_snd_device);
kfree(mbhc_cfg.calibration);
}
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 23eee9d..79ce671 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -438,7 +438,6 @@
mutex_lock(&routing_lock);
- adm_pseudo_close(PSEUDOPORT_01);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (!is_be_dai_extproc(i) &&
(msm_bedais[i].active) &&
@@ -480,8 +479,12 @@
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
- adm_close(msm_bedais[i].port_id);
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ if (msm_bedais[i].port_id == PSEUDOPORT_01)
+ adm_pseudo_close(msm_bedais[i].port_id);
+ else
+ adm_close(msm_bedais[i].port_id);
+ }
}
fe_dai_map[fedai_id][session_type] = INVALID_SESSION;
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 6cabc97..2d44a41 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -1790,6 +1790,9 @@
goto fail_cmd;
}
pr_debug("%s: port_id=%d\n", __func__, port_id);
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX))
+ return 0;
port_id = afe_convert_virtual_to_portid(port_id);
stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index cde5b02..1dfa605 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -1765,6 +1765,9 @@
case FORMAT_AMRWB:
open.write_format = AMRWB_FS;
break;
+ case FORMAT_AMR_WB_PLUS:
+ open.write_format = AMR_WB_PLUS;
+ break;
case FORMAT_V13K:
open.write_format = V13K_FS;
break;
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 9e08be3..f151e51 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -30,7 +30,7 @@
#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
-static int enable_ocmem_audio_voice;
+static int enable_ocmem_audio_voice = 1;
module_param(enable_ocmem_audio_voice, int,
S_IRUGO | S_IWUSR | S_IWGRP);
MODULE_PARM_DESC(enable_ocmem_audio_voice, "control OCMEM usage for audio/voice");
@@ -423,10 +423,10 @@
struct voice_ocmem_workdata *workdata = NULL;
if (enable) {
- if (enable_ocmem_audio_voice)
- audio_ocmem_lcl.ocmem_en = true;
- else
+ if (!enable_ocmem_audio_voice)
audio_ocmem_lcl.ocmem_en = false;
+ else
+ audio_ocmem_lcl.ocmem_en = true;
}
if (audio_ocmem_lcl.ocmem_en) {
if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
@@ -527,10 +527,10 @@
struct audio_ocmem_workdata *workdata = NULL;
if (enable) {
- if (enable_ocmem_audio_voice)
- audio_ocmem_lcl.ocmem_en = true;
- else
+ if (!enable_ocmem_audio_voice)
audio_ocmem_lcl.ocmem_en = false;
+ else
+ audio_ocmem_lcl.ocmem_en = true;
}
if (audio_ocmem_lcl.ocmem_en) {
@@ -611,7 +611,7 @@
atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_DEFAULT);
atomic_set(&audio_ocmem_lcl.audio_exit, 0);
spin_lock_init(&audio_ocmem_lcl.audio_lock);
- audio_ocmem_lcl.ocmem_en = false;
+ audio_ocmem_lcl.ocmem_en = true;
/* populate platform data */
ret = audio_ocmem_platform_data_populate(pdev);
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 7ba6514..94c1c85 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -457,6 +457,7 @@
pr_debug("%s: Trigger start\n", __func__);
q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
atomic_set(&prtd->start, 1);
+ atomic_set(&prtd->pending_buffer, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 621d24b..a307bcc 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -1310,6 +1310,16 @@
switch (mi2s_id) {
case MSM_PRIM_MI2S:
*port_id = MI2S_RX;
+ break;
+ case MSM_SEC_MI2S:
+ *port_id = AFE_PORT_ID_SECONDARY_MI2S_RX;
+ break;
+ case MSM_TERT_MI2S:
+ *port_id = AFE_PORT_ID_TERTIARY_MI2S_RX;
+ break;
+ case MSM_QUAT_MI2S:
+ *port_id = AFE_PORT_ID_QUATERNARY_MI2S_RX;
+ break;
break;
default:
ret = -1;
@@ -1320,7 +1330,16 @@
switch (mi2s_id) {
case MSM_PRIM_MI2S:
*port_id = MI2S_TX;
- break;
+ break;
+ case MSM_SEC_MI2S:
+ *port_id = AFE_PORT_ID_SECONDARY_MI2S_TX;
+ break;
+ case MSM_TERT_MI2S:
+ *port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+ break;
+ case MSM_QUAT_MI2S:
+ *port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+ break;
default:
ret = -1;
break;
@@ -1330,7 +1349,7 @@
ret = -1;
break;
}
- pr_debug("%s: port_id = %x\n", __func__, *port_id);
+ pr_debug("%s: port_id = %#x\n", __func__, *port_id);
return ret;
}
@@ -1346,15 +1365,17 @@
u16 port_id = 0;
int rc = 0;
- dev_dbg(dai->dev, "%s: device name %s dai id %x,port id = %x\n",
- __func__, dai->name, dai->id, port_id);
-
if (msm_mi2s_get_port_id(dai->id, substream->stream,
&port_id) != 0) {
- dev_err(dai->dev, "%s: Invalid Port ID\n", __func__);
+ dev_err(dai->dev, "%s: Invalid Port ID %#x\n",
+ __func__, port_id);
return -EINVAL;
}
+ dev_dbg(dai->dev, "%s: dai id %d, afe port id = %x\n"
+ "dai_data->channels = %u sample_rate = %u\n", __func__,
+ dai->id, port_id, dai_data->channels, dai_data->rate);
+
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
/* PORT START should be set if prepare called
* in active state.
@@ -1382,6 +1403,8 @@
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
+ struct afe_param_id_i2s_cfg *i2s = &dai_data->port_config.i2s;
+
dai_data->channels = params_channels(params);
switch (dai_data->channels) {
@@ -1451,13 +1474,19 @@
mi2s_dai_data->bitwidth_constraint.list = &dai_data->bitwidth;
}
- pr_debug("%s: dai_data->channels = %d, line = %d\n"
- ",mono_stereo =%x sample rate = %x\n", __func__,
- dai_data->channels, dai_data->port_config.i2s.channel_mode,
- dai_data->port_config.i2s.mono_stereo, dai_data->rate);
+ dev_dbg(dai->dev, "%s: dai id %d dai_data->channels = %d\n"
+ "sample_rate = %u i2s_cfg_minor_version = %#x\n"
+ "bit_width = %hu channel_mode = %#x mono_stereo = %#x\n"
+ "ws_src = %#x sample_rate = %u data_format = %#x\n"
+ "reserved = %u\n", __func__, dai->id, dai_data->channels,
+ dai_data->rate, i2s->i2s_cfg_minor_version, i2s->bit_width,
+ i2s->channel_mode, i2s->mono_stereo, i2s->ws_src,
+ i2s->sample_rate, i2s->data_format, i2s->reserved);
+
return 0;
+
error_invalid_data:
- pr_debug("%s: dai_data->channels = %d, line = %d\n", __func__,
+ pr_debug("%s: dai_data->channels = %d channel_mode = %d\n", __func__,
dai_data->channels, dai_data->port_config.i2s.channel_mode);
return -EINVAL;
}
@@ -1507,11 +1536,12 @@
if (msm_mi2s_get_port_id(dai->id, substream->stream,
&port_id) != 0) {
- dev_err(dai->dev, "%s: Invalid Port ID\n", __func__);
+ dev_err(dai->dev, "%s: Invalid Port ID %#x\n",
+ __func__, port_id);
}
- dev_dbg(dai->dev, "%s: device name %s port id = %x\n",
- __func__, dai->name, port_id);
+ dev_dbg(dai->dev, "%s: closing afe port id = %x\n",
+ __func__, port_id);
if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
rc = afe_close(port_id);
@@ -1714,20 +1744,21 @@
if (rc) {
dev_err(&pdev->dev,
"%s: missing %x in dt node\n", __func__, mi2s_intf);
- return rc;
+ return rc;
}
- if (mi2s_intf > MSM_QUAD_MI2S) {
- dev_err(&pdev->dev, "%s: Invalid MI2S ID from Device Tree\n",
- __func__);
- return -EINVAL;
+ dev_dbg(&pdev->dev, "dev name %s dev id %x\n", dev_name(&pdev->dev),
+ mi2s_intf);
+
+ if (mi2s_intf < MSM_PRIM_MI2S || mi2s_intf > MSM_QUAT_MI2S) {
+ dev_err(&pdev->dev,
+ "%s: Invalid MI2S ID %u from Device Tree\n",
+ __func__, mi2s_intf);
+ return -ENXIO;
}
- if (mi2s_intf == MSM_PRIM_MI2S) {
- dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-mi2s",
- MSM_PRIM_MI2S);
- pdev->id = MSM_PRIM_MI2S;
- }
+ dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-mi2s", mi2s_intf);
+ pdev->id = mi2s_intf;
mi2s_pdata = kzalloc(sizeof(struct msm_mi2s_pdata), GFP_KERNEL);
if (!mi2s_pdata) {
@@ -1736,9 +1767,6 @@
goto rtn;
}
- dev_dbg(&pdev->dev, "dev name %s dev id %x\n", dev_name(&pdev->dev),
- pdev->id);
-
rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-rx-lines",
&rx_line);
if (rc) {
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 2e0c229..17c18dd 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -142,6 +142,8 @@
{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0},
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0},
};
@@ -943,6 +945,21 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX ,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -1043,6 +1060,9 @@
SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -1623,8 +1643,13 @@
SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+ 0, 0, 0, 0),
+
SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("MI2S_TX", "MI2S Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+ 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback",
0, 0, 0 , 0),
@@ -1684,6 +1709,9 @@
hdmi_mixer_controls, ARRAY_SIZE(hdmi_mixer_controls)),
SND_SOC_DAPM_MIXER("MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
mi2s_rx_mixer_controls, ARRAY_SIZE(mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quaternary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0,
mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
@@ -1827,9 +1855,16 @@
{"MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
+ {"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"},
+
{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
{"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -2006,8 +2041,11 @@
{"BE_OUT", NULL, "SLIMBUS_4_RX"},
{"BE_OUT", NULL, "HDMI"},
{"BE_OUT", NULL, "MI2S_RX"},
+ {"BE_OUT", NULL, "QUAT_MI2S_RX"},
+
{"PRI_I2S_TX", NULL, "BE_IN"},
{"MI2S_TX", NULL, "BE_IN"},
+ {"QUAT_MI2S_TX", NULL, "BE_IN"},
{"SLIMBUS_0_TX", NULL, "BE_IN" },
{"SLIMBUS_1_TX", NULL, "BE_IN" },
{"SLIMBUS_3_TX", NULL, "BE_IN" },
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 261c359..be646ed 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -33,6 +33,8 @@
#define LPASS_BE_MI2S_RX "MI2S_RX"
#define LPASS_BE_MI2S_TX "MI2S_TX"
+#define LPASS_BE_QUAT_MI2S_RX "QUAT_MI2S_RX"
+#define LPASS_BE_QUAT_MI2S_TX "QUAT_MI2S_TX"
#define LPASS_BE_STUB_RX "STUB_RX"
#define LPASS_BE_STUB_TX "STUB_TX"
#define LPASS_BE_SLIMBUS_1_RX "SLIMBUS_1_RX"
@@ -95,6 +97,8 @@
MSM_BACKEND_DAI_EXTPROC_RX,
MSM_BACKEND_DAI_EXTPROC_TX,
MSM_BACKEND_DAI_EXTPROC_EC_TX,
+ MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_BACKEND_DAI_MAX,
};
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 6acc136..7267a82 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -34,11 +34,11 @@
struct adm_ctl {
void *apr;
- atomic_t copp_id[Q6_AFE_MAX_PORTS];
- atomic_t copp_cnt[Q6_AFE_MAX_PORTS];
- atomic_t copp_stat[Q6_AFE_MAX_PORTS];
- u32 mem_map_handle[Q6_AFE_MAX_PORTS];
- wait_queue_head_t wait[Q6_AFE_MAX_PORTS];
+ atomic_t copp_id[AFE_MAX_PORTS];
+ atomic_t copp_cnt[AFE_MAX_PORTS];
+ atomic_t copp_stat[AFE_MAX_PORTS];
+ u32 mem_map_handle[AFE_MAX_PORTS];
+ wait_queue_head_t wait[AFE_MAX_PORTS];
};
static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
@@ -81,7 +81,7 @@
this_adm.apr);
if (this_adm.apr) {
apr_reset(this_adm.apr);
- for (i = 0; i < Q6_AFE_MAX_PORTS; i++) {
+ for (i = 0; i < AFE_MAX_PORTS; i++) {
atomic_set(&this_adm.copp_id[i],
RESET_COPP_ID);
atomic_set(&this_adm.copp_cnt[i], 0);
@@ -107,7 +107,7 @@
adm_callback_debug_print(data);
if (data->payload_size) {
index = q6audio_get_port_index(data->token);
- if (index < 0 || index >= Q6_AFE_MAX_PORTS) {
+ if (index < 0 || index >= AFE_MAX_PORTS) {
pr_err("%s: invalid port idx %d token %d\n",
__func__, index, data->token);
return 0;
@@ -120,10 +120,10 @@
}
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
+ __func__);
if (rtac_make_adm_callback(
payload, data->payload_size)) {
- pr_debug("%s: payload[0]: 0x%x\n",
- __func__, payload[0]);
break;
}
case ADM_CMD_DEVICE_CLOSE_V5:
@@ -148,6 +148,20 @@
wake_up(&this_adm.wait[index]);
}
break;
+ case ADM_CMD_GET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* ADM_CMDRSP_GET_PP_PARAMS_V5 */
+ if (payload[1] != 0) {
+ pr_err("%s: ADM get param error = %d, resuming\n",
+ __func__, payload[1]);
+ rtac_make_adm_callback(payload,
+ data->payload_size);
+ }
+ break;
default:
pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
payload[0]);
@@ -174,8 +188,11 @@
wake_up(&this_adm.wait[index]);
}
break;
- case ADM_CMD_GET_PP_PARAMS_V5:
- pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n", __func__);
+ case ADM_CMDRSP_GET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
+ if (payload[0] != 0)
+ pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
+ __func__, payload[0]);
rtac_make_adm_callback(payload,
data->payload_size);
break;
@@ -202,15 +219,15 @@
struct adm_cmd_set_pp_params_v5 adm_params;
int index = afe_get_port_index(port_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: invalid port idx %d portid %d\n",
+ pr_err("%s: invalid port idx %d portid %#x\n",
__func__, index, port_id);
return 0;
}
- pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
+ pr_debug("%s: Port id %#x, index %d\n", __func__, port_id, index);
if (!aud_cal || aud_cal->cal_size == 0) {
- pr_debug("%s: No ADM cal to send for port_id = %d!\n",
+ pr_debug("%s: No ADM cal to send for port_id = %#x!\n",
__func__, port_id);
result = -EINVAL;
goto done;
@@ -240,7 +257,7 @@
adm_params.payload_size);
result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
if (result < 0) {
- pr_err("%s: Set params failed port = %d payload = 0x%x\n",
+ pr_err("%s: Set params failed port = %#x payload = 0x%x\n",
__func__, port_id, aud_cal->cal_paddr);
result = -EINVAL;
goto done;
@@ -250,7 +267,7 @@
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!result) {
- pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
+ pr_err("%s: Set params timed out port = %#x, payload = 0x%x\n",
__func__, port_id, aud_cal->cal_paddr);
result = -EINVAL;
goto done;
@@ -300,10 +317,10 @@
}
if (!send_adm_cal_block(port_id, &aud_cal))
- pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
+ pr_debug("%s: Audproc cal sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
else
- pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
+ pr_debug("%s: Audproc cal not sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
pr_debug("%s: Sending audvol cal\n", __func__);
@@ -334,10 +351,10 @@
}
if (!send_adm_cal_block(port_id, &aud_cal))
- pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
+ pr_debug("%s: Audvol cal sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
else
- pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
+ pr_debug("%s: Audvol cal not sent for port id: %#x, path %d\n",
__func__, port_id, acdb_path);
}
@@ -367,7 +384,7 @@
rtac_set_adm_handle(this_adm.apr);
}
index = afe_get_port_index(port_id);
- pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+ pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
@@ -388,7 +405,7 @@
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
if (ret < 0) {
- pr_err("%s:ADM enable for port %d failed\n",
+ pr_err("%s:ADM enable for port %#x failed\n",
__func__, port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -398,7 +415,7 @@
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
- pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+ pr_err("%s ADM connect AFE failed for port %#x\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -418,18 +435,18 @@
int index;
int tmp_port = q6audio_get_port_id(port_id);
- pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
+ pr_debug("%s: port %#x path:%d rate:%d mode:%d\n", __func__,
port_id, path, rate, channel_mode);
port_id = q6audio_convert_virtual_to_portid(port_id);
if (q6audio_validate_port(port_id) < 0) {
- pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ pr_err("%s port idi[%#x] is invalid\n", __func__, port_id);
return -ENODEV;
}
index = q6audio_get_port_index(port_id);
- pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+ pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);
if (this_adm.apr == NULL) {
this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
@@ -526,15 +543,15 @@
return -EINVAL;
}
- pr_debug("%s: port_id=%d rate=%d"
- "topology_id=0x%X\n", __func__, open.endpoint_id_1, \
- open.sample_rate, open.topology_id);
+ pr_debug("%s: port_id=%#x rate=%d topology_id=0x%X\n",
+ __func__, open.endpoint_id_1, open.sample_rate,
+ open.topology_id);
atomic_set(&this_adm.copp_stat[index], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
if (ret < 0) {
- pr_err("%s:ADM enable for port %d for[%d] failed\n",
+ pr_err("%s:ADM enable for port %#x for[%d] failed\n",
__func__, tmp_port, port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -544,7 +561,7 @@
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
- pr_err("%s ADM open failed for port %d"
+ pr_err("%s ADM open failed for port %#x"
"for [%d]\n", __func__, tmp_port, port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -582,7 +599,7 @@
/* Assumes port_ids have already been validated during adm_open */
int index = q6audio_get_port_index(copp_id);
- if (index < 0 || index >= Q6_AFE_MAX_PORTS) {
+ if (index < 0 || index >= AFE_MAX_PORTS) {
pr_err("%s: invalid port idx %d token %d\n",
__func__, index, copp_id);
return 0;
@@ -598,7 +615,7 @@
}
route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;
- pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0] :%d coppid[%d]\n",
+ pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
__func__, session_id, path, num_copps, port_id[0], copp_id);
route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -641,10 +658,10 @@
tmp = q6audio_get_port_index(port_id[i]);
- if (tmp >= 0 && tmp < Q6_AFE_MAX_PORTS)
+ if (tmp >= 0 && tmp < AFE_MAX_PORTS)
copps_list[i] =
atomic_read(&this_adm.copp_id[tmp]);
- pr_debug("%s: port_id[%d]: %d, index: %d act coppid[0x%x]\n",
+ pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
__func__, i, port_id[i], tmp,
atomic_read(&this_adm.copp_id[tmp]));
}
@@ -652,7 +669,7 @@
ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
if (ret < 0) {
- pr_err("%s: ADM routing for port %d failed\n",
+ pr_err("%s: ADM routing for port %#x failed\n",
__func__, port_id[0]);
ret = -EINVAL;
goto fail_cmd;
@@ -661,7 +678,7 @@
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
- pr_err("%s: ADM cmd Route failed for port %d\n",
+ pr_err("%s: ADM cmd Route failed for port %#x\n",
__func__, port_id[0]);
ret = -EINVAL;
goto fail_cmd;
@@ -669,6 +686,11 @@
for (i = 0; i < num_copps; i++)
send_adm_cal(port_id[i], path);
+ for (i = 0; i < num_copps; i++)
+ rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
+ [afe_get_port_index(port_id[i])]),
+ path, session_id);
+
fail_cmd:
kfree(matrix_map);
return ret;
@@ -702,7 +724,7 @@
port_id = q6audio_convert_virtual_to_portid(port_id);
if (q6audio_validate_port(port_id) < 0) {
- pr_err("%s port id[%d] is invalid\n", __func__, port_id);
+ pr_err("%s port id[%#x] is invalid\n", __func__, port_id);
return -ENODEV;
}
@@ -842,10 +864,10 @@
if (q6audio_validate_port(port_id) < 0)
return -EINVAL;
- pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
+ pr_debug("%s port_id=%#x index %d\n", __func__, port_id, index);
if (!(atomic_read(&this_adm.copp_cnt[index]))) {
- pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
+ pr_err("%s: copp count for port[%#x]is 0\n", __func__, port_id);
goto fail_cmd;
}
@@ -868,7 +890,7 @@
atomic_set(&this_adm.copp_stat[index], 0);
- pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
+ pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
__func__,
atomic_read(&this_adm.copp_id[index]),
port_id, index,
@@ -885,7 +907,7 @@
atomic_read(&this_adm.copp_stat[index]),
msecs_to_jiffies(TIMEOUT_MS));
if (!ret) {
- pr_err("%s: ADM cmd Route failed for port %d\n",
+ pr_err("%s: ADM cmd Route failed for port %#x\n",
__func__, port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -903,7 +925,7 @@
int i = 0;
this_adm.apr = NULL;
- for (i = 0; i < Q6_AFE_MAX_PORTS; i++) {
+ for (i = 0; i < AFE_MAX_PORTS; i++) {
atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
atomic_set(&this_adm.copp_cnt[i], 0);
atomic_set(&this_adm.copp_stat[i], 0);
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 8d8ff5d..de9841a 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -165,6 +165,7 @@
case INT_FM_RX:
case VOICE_PLAYBACK_TX:
case RT_PROXY_PORT_001_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
ret = MSM_AFE_PORT_TYPE_RX;
break;
@@ -183,12 +184,13 @@
case VOICE_RECORD_RX:
case INT_BT_SCO_TX:
case RT_PROXY_PORT_001_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
ret = MSM_AFE_PORT_TYPE_TX;
break;
default:
WARN_ON(1);
- pr_err("%s: invalid port id %d\n", __func__, port_id);
+ pr_err("%s: invalid port id %#x\n", __func__, port_id);
ret = -EINVAL;
}
@@ -283,10 +285,10 @@
(port_id == RT_PROXY_DAI_001_TX))
port_id = VIRTUAL_ID_TO_PORTID(port_id);
- pr_debug("%s: port id: %d\n", __func__, port_id);
+ pr_debug("%s: port id: %#x\n", __func__, port_id);
index = q6audio_get_port_index(port_id);
if (q6audio_validate_port(port_id) < 0) {
- pr_err("%s: port id: %d\n", __func__, port_id);
+ pr_err("%s: port id: %#x\n", __func__, port_id);
return -EINVAL;
}
@@ -295,7 +297,7 @@
return ret;
if (q6audio_validate_port(port_id) < 0) {
- pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+ pr_err("%s: Failed : Invalid Port id = %#x\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -321,6 +323,12 @@
case SECONDARY_I2S_TX:
case MI2S_RX:
case MI2S_TX:
+ case AFE_PORT_ID_SECONDARY_MI2S_RX:
+ case AFE_PORT_ID_SECONDARY_MI2S_TX:
+ case AFE_PORT_ID_TERTIARY_MI2S_RX:
+ case AFE_PORT_ID_TERTIARY_MI2S_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
cfg_type = AFE_PARAM_ID_I2S_CONFIG;
break;
case HDMI_RX:
@@ -370,7 +378,7 @@
atomic_set(&this_afe.status, 0);
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
if (ret < 0) {
- pr_err("%s: AFE enable for port %d failed\n", __func__,
+ pr_err("%s: AFE enable for port %#x failed\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -407,7 +415,7 @@
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
if (IS_ERR_VALUE(ret)) {
- pr_err("%s: AFE enable for port %d failed\n", __func__,
+ pr_err("%s: AFE enable for port %#x failed\n", __func__,
port_id);
ret = -EINVAL;
goto fail_cmd;
@@ -468,7 +476,10 @@
case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
-
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ return IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX;
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ return IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX;
default: return -EINVAL;
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 2d52c43..0ddaabe 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -932,6 +932,10 @@
__func__, payload[0], payload[1]);
if (data->opcode == APR_BASIC_RSP_RESULT) {
token = data->token;
+ if (payload[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, payload[0], payload[1]);
+ }
switch (payload[0]) {
case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
if (rtac_make_asm_callback(ac->session, payload,
@@ -965,6 +969,20 @@
ac->cb(data->opcode, data->token,
(uint32_t *)data->payload, ac->priv);
break;
+ case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
+ pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 */
+ if (payload[1] != 0) {
+ pr_err("%s: ASM get param error = %d, resuming\n",
+ __func__, payload[1]);
+ rtac_make_asm_callback(ac->session, payload,
+ data->payload_size);
+ }
+ break;
default:
pr_debug("%s:command[0x%x] not expecting rsp\n",
__func__, payload[0]);
@@ -1008,6 +1026,10 @@
break;
}
case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+ pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2\n", __func__);
+ if (payload[0] != 0)
+ pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n",
+ __func__, payload[0]);
rtac_make_asm_callback(ac->session, payload,
data->payload_size);
break;
@@ -2347,7 +2369,7 @@
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
- pr_info("%s: Found the element\n", __func__);
+ pr_debug("%s: Found the element\n", __func__);
mem_unmap.mem_map_handle = buf_node->mmap_hdl;
break;
}
@@ -2365,7 +2387,7 @@
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
if (!rc) {
- pr_err("timeout. waited for memory_map\n");
+ pr_err("timeout. waited for memory_unmap\n");
rc = -EINVAL;
goto fail_cmd;
}
@@ -2375,6 +2397,7 @@
if (buf_node->buf_addr_lsw == buf_add) {
list_del(&buf_node->list);
kfree(buf_node);
+ break;
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6audio-v2.c b/sound/soc/msm/qdsp6v2/q6audio-v2.c
index 8c524fa..033cb8e 100644
--- a/sound/soc/msm/qdsp6v2/q6audio-v2.c
+++ b/sound/soc/msm/qdsp6v2/q6audio-v2.c
@@ -48,7 +48,10 @@
case INT_FM_TX: return IDX_INT_FM_TX;
case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
-
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ return IDX_AFE_PORT_ID_QUATERNARY_MI2S_RX;
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ return IDX_AFE_PORT_ID_QUATERNARY_MI2S_TX;
default: return -EINVAL;
}
}
@@ -82,6 +85,10 @@
case INT_FM_TX: return AFE_PORT_ID_INTERNAL_FM_TX;
case RT_PROXY_PORT_001_RX: return AFE_PORT_ID_RT_PROXY_PORT_001_RX;
case RT_PROXY_PORT_001_TX: return AFE_PORT_ID_RT_PROXY_PORT_001_TX;
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ return AFE_PORT_ID_QUATERNARY_MI2S_RX;
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ return AFE_PORT_ID_QUATERNARY_MI2S_TX;
default: return -EINVAL;
}
@@ -138,6 +145,8 @@
case INT_FM_TX:
case RT_PROXY_PORT_001_RX:
case RT_PROXY_PORT_001_TX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_RX:
+ case AFE_PORT_ID_QUATERNARY_MI2S_TX:
{
ret = 0;
break;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 338cfe3..b799e59 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
#include <mach/socinfo.h>
+#include <mach/qdsp6v2/apr_tal.h>
#include "sound/apr_audio-v2.h"
#include "sound/q6afe-v2.h"
@@ -208,6 +209,8 @@
static int voice_apr_register(void)
{
+ void *modem_mvm, *modem_cvs, *modem_cvp;
+
pr_debug("%s\n", __func__);
mutex_lock(&common.common_lock);
@@ -224,6 +227,18 @@
pr_err("%s: Unable to register MVM\n", __func__);
goto err;
}
+
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_mvm = apr_register("MODEM", "MVM",
+ qdsp_mvm_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_mvm == NULL)
+ pr_err("%s: Unable to register MVM for MODEM\n",
+ __func__);
}
if (common.apr_q6_cvs == NULL) {
@@ -237,6 +252,18 @@
pr_err("%s: Unable to register CVS\n", __func__);
goto err;
}
+ rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvs = apr_register("MODEM", "CVS",
+ qdsp_cvs_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvs == NULL)
+ pr_err("%s: Unable to register CVS for MODEM\n",
+ __func__);
}
@@ -251,6 +278,18 @@
pr_err("%s: Unable to register CVP\n", __func__);
goto err;
}
+ rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvp = apr_register("MODEM", "CVP",
+ qdsp_cvp_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvp == NULL)
+ pr_err("%s: Unable to register CVP for MODEM\n",
+ __func__);
}
@@ -262,6 +301,7 @@
if (common.apr_q6_cvs != NULL) {
apr_deregister(common.apr_q6_cvs);
common.apr_q6_cvs = NULL;
+ rtac_set_voice_handle(RTAC_CVS, NULL);
}
if (common.apr_q6_mvm != NULL) {
apr_deregister(common.apr_q6_mvm);
@@ -605,8 +645,9 @@
cvs_handle = voice_get_cvs_handle(v);
/* MVM, CVS sessions are destroyed only for Full control sessions. */
- if (is_voip_session(v->session_id)) {
- pr_debug("%s: MVM detach stream\n", __func__);
+ if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__,
+ v->voc_state);
/* Detach voice stream. */
detach_stream.hdr.hdr_field =
@@ -2176,6 +2217,10 @@
if (v->rec_info.rec_enable)
voice_cvs_start_record(v, v->rec_info.rec_mode);
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
return 0;
@@ -2526,6 +2571,7 @@
goto fail;
}
+ rtac_remove_voice(voice_get_cvs_handle(v));
cvp_handle = 0;
voice_set_cvp_handle(v, cvp_handle);
return 0;
@@ -3281,6 +3327,7 @@
mutex_lock(&v->lock);
if (v->voc_state == VOC_RUN) {
+ rtac_remove_voice(voice_get_cvs_handle(v));
/* send cmd to dsp to disable vocproc */
ret = voice_send_disable_vocproc_cmd(v);
if (ret < 0) {
@@ -3324,32 +3371,36 @@
voice_send_cvp_register_cal_cmd(v);
voice_send_cvp_register_vol_cal_cmd(v);
- ret = voice_send_enable_vocproc_cmd(v);
- if (ret < 0) {
- pr_err("%s: enable vocproc failed %d\n", __func__, ret);
- goto fail;
- }
+ ret = voice_send_enable_vocproc_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: enable vocproc failed %d\n", __func__, ret);
+ goto fail;
+ }
- /* Send tty mode if tty device is used */
- voice_send_tty_mode_cmd(v);
+ /* Send tty mode if tty device is used */
+ voice_send_tty_mode_cmd(v);
- /* enable widevoice if wv_enable is set */
- if (v->wv_enable)
- voice_send_set_widevoice_enable_cmd(v);
+ /* enable widevoice if wv_enable is set */
+ if (v->wv_enable)
+ voice_send_set_widevoice_enable_cmd(v);
- /* enable slowtalk */
- if (v->st_enable)
- voice_send_set_pp_enable_cmd(v,
+ /* enable slowtalk */
+ if (v->st_enable)
+ voice_send_set_pp_enable_cmd(v,
MODULE_ID_VOICE_MODULE_ST,
v->st_enable);
- /* enable FENS */
- if (v->fens_enable)
- voice_send_set_pp_enable_cmd(v,
+ /* enable FENS */
+ if (v->fens_enable)
+ voice_send_set_pp_enable_cmd(v,
MODULE_ID_VOICE_MODULE_FENS,
v->fens_enable);
- v->voc_state = VOC_RUN;
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
+ v->voc_state = VOC_RUN;
}
fail:
@@ -3702,7 +3753,9 @@
mutex_lock(&v->lock);
- if (v->voc_state == VOC_RUN) {
+ if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
+
ret = voice_destroy_vocproc(v);
if (ret < 0)
pr_err("%s: destroy voice failed\n", __func__);
@@ -3727,6 +3780,13 @@
mutex_lock(&v->lock);
+ if (v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC in ERR state\n", __func__);
+
+ voice_destroy_mvm_cvs_session(v);
+ v->voc_state = VOC_INIT;
+ }
+
if ((v->voc_state == VOC_INIT) ||
(v->voc_state == VOC_RELEASE)) {
ret = voice_apr_register();
@@ -3817,6 +3877,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3825,6 +3886,36 @@
c = priv;
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received MODEM reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+ apr_reset(c->apr_q6_mvm);
+ c->apr_q6_mvm = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].mvm_handle = 0;
+ }
+ return 0;
+ }
+
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
v = voice_get_session(data->dest_port);
@@ -3834,23 +3925,6 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_mvm);
- c->apr_q6_mvm = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].mvm_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
@@ -3935,6 +4009,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3944,6 +4019,35 @@
c = priv;
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvs);
+ c->apr_q6_cvs = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvs_handle = 0;
+ }
+ return 0;
+ }
v = voice_get_session(data->dest_port);
if (v == NULL) {
@@ -3952,28 +4056,15 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvs);
- c->apr_q6_cvs = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvs_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
pr_info("%x %x\n", ptr[0], ptr[1]);
+ if (ptr[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, ptr[0], ptr[1]);
+ }
/*response from CVS */
switch (ptr[0]) {
case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
@@ -4010,6 +4101,24 @@
wake_up(&v->cvs_wait);
break;
case VOICE_CMD_SET_PARAM:
+ pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+ rtac_make_voice_callback(RTAC_CVS, ptr,
+ data->payload_size);
+ break;
+ case VOICE_CMD_GET_PARAM:
+ pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* VOICE_EVT_GET_PARAM_ACK */
+ if (ptr[1] != 0) {
+ pr_err("%s: CVP get param error = %d, resuming\n",
+ __func__, ptr[1]);
+ rtac_make_voice_callback(RTAC_CVP,
+ data->payload,
+ data->payload_size);
+ }
break;
default:
pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
@@ -4125,7 +4234,16 @@
pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
- } else
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ ptr = data->payload;
+ if (ptr[0] != 0) {
+ pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+ __func__, ptr[0]);
+ }
+ rtac_make_voice_callback(RTAC_CVS, data->payload,
+ data->payload_size);
+ } else
pr_err("Unknown opcode 0x%x\n", data->opcode);
fail:
@@ -4138,6 +4256,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -4146,6 +4265,33 @@
c = priv;
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvp);
+ c->apr_q6_cvp = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvp_handle = 0;
+ }
+ return 0;
+ }
+
v = voice_get_session(data->dest_port);
if (v == NULL) {
pr_err("%s: v is NULL\n", __func__);
@@ -4153,28 +4299,15 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvp);
- c->apr_q6_cvp = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvp_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
pr_info("%x %x\n", ptr[0], ptr[1]);
+ if (ptr[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, ptr[0], ptr[1]);
+ }
switch (ptr[0]) {
case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2:
/*response from CVP */
@@ -4206,6 +4339,24 @@
wake_up(&v->cvp_wait);
break;
case VOICE_CMD_SET_PARAM:
+ pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+ rtac_make_voice_callback(RTAC_CVP, ptr,
+ data->payload_size);
+ break;
+ case VOICE_CMD_GET_PARAM:
+ pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* VOICE_EVT_GET_PARAM_ACK */
+ if (ptr[1] != 0) {
+ pr_err("%s: CVP get param error = %d, resuming\n",
+ __func__, ptr[1]);
+ rtac_make_voice_callback(RTAC_CVP,
+ data->payload,
+ data->payload_size);
+ }
break;
default:
pr_debug("%s: not match cmd = 0x%x\n",
@@ -4213,6 +4364,15 @@
break;
}
}
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ ptr = data->payload;
+ if (ptr[0] != 0) {
+ pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+ __func__, ptr[0]);
+ }
+ rtac_make_voice_callback(RTAC_CVP, data->payload,
+ data->payload_size);
}
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 9f82694..aef463f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@
VOC_RUN,
VOC_CHANGE,
VOC_RELEASE,
+ VOC_ERROR,
};
struct mem_buffer {
@@ -884,10 +885,6 @@
#define VSS_MEDIA_ID_4GV_WB_MODEM 0x00010FC4
/*CDMA EVRC-WB vocoder modem format */
-#define VOICE_CMD_SET_PARAM 0x00011006
-#define VOICE_CMD_GET_PARAM 0x00011007
-#define VOICE_EVT_GET_PARAM_ACK 0x00011008
-
#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2 0x000112BF
struct vss_ivocproc_cmd_create_full_control_session_v2_t {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4c6a5a4..f02e5c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -297,9 +297,37 @@
{
u64 config = evsel->attr.config;
int type = evsel->attr.type;
+ char *buf;
+ size_t buf_sz;
- if (evsel->name)
+ if (evsel->name) {
+ /* Make new space for the modifier bits. */
+ buf_sz = strlen(evsel->name) + 3;
+ buf = malloc(buf_sz);
+ if (!buf)
+ /*
+ * Always return what was already in 'name'.
+ */
+ return evsel->name;
+
+ strlcpy(buf, evsel->name, buf_sz);
+
+ free(evsel->name);
+
+ evsel->name = buf;
+
+ /* User mode profiling. */
+ if (!evsel->attr.exclude_user && evsel->attr.exclude_kernel)
+ strlcpy(&evsel->name[strlen(evsel->name)], ":u",
+ buf_sz);
+ /* Kernel mode profiling. */
+ else if (!evsel->attr.exclude_kernel &&
+ evsel->attr.exclude_user)
+ strlcpy(&evsel->name[strlen(evsel->name)], ":k",
+ buf_sz);
+
return evsel->name;
+ }
return __event_name(type, config, NULL);
}