Merge "msm: adsp-loader: Add support for adsp state in adsp loader"
diff --git a/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
new file mode 100644
index 0000000..ed45192
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
@@ -0,0 +1,26 @@
+* Silicon Image-8334 MHL Tx
+
+Required properties:
+- compatible: must be "qcom,mhl-sii8334"
+- reg: i2c slave address
+- mhl-intr-gpio: MHL interrupt gpio coming out of sii8334
+- mhl-pwr-gpio: MHL power gpio required for power rails
+- mhl-rst-gpio: MHL reset gpio going into sii8334 for toggling reset pin
+- <supply-name>-supply: phandle to the regulator device tree node.
+
+Example:
+ i2c@f9967000 {
+ sii8334@72 {
+ compatible = "qcom,mhl-sii8334";
+ reg = <0x72>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <82 0x8>;
+ mhl-intr-gpio = <&msmgpio 82 0>;
+ mhl-pwr-gpio = <&msmgpio 12 0>;
+ mhl-rst-gpio = <&pm8941_mpps 8 0>;
+ avcc_18-supply = <&pm8941_l24>;
+ avcc_12-supply = <&pm8941_l2>;
+ smps3a-supply = <&pm8941_s3>;
+ vdda-supply = <&pm8941_l12>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 556300d..802716c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -18,8 +18,7 @@
- qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
images and self-authentication is not desired;
<1> if the hardware requires self-authenticating images.
-- qcom,is_loadable: <0> if PIL should not load the modem image
- <1> if PIL is required to load the modem image
+- qcom,is-loadable: if PIL is required to load the modem image
Example:
qcom,mss@fc880000 {
@@ -34,7 +33,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..86c60e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,81 @@
+Qualcomm Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+ registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+ "bam-base" - string to identify the IPA BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+ "bam-irq" - string to identify the IPA BAM interrupt.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+ 0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+ destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size: Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+ size of the IPA pipe memory region.
+ Pipe memory is a feature which may be supported by the
+ target (HW platform). The Driver support using pipe
+ memory instead of system memory. In case this property
+ will not appear in the IPA DTS entry, the driver will
+ use system memory.
+
+Example:
+
+qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index e784bfa..ae7d736 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,15 +34,20 @@
- reg : offset and length of the register set for the device.
- interrupts : should contain the uart interrupt.
-Optional properties:
-- cell-index: An integer specifying the line number of the UART device that
- represents this HSL hardware instance.
+Aliases:
+An alias may optionally be used to bind the serial device to a tty device
+(ttyHSLx) with a given line number. Aliases are of the form serial<n> where <n>
+is an integer representing the line number to use. On systems with multiple
+serial devices present it is recommended that an alias be defined for each such
+device.
Example:
+ aliases {
+ serial0 = &uart0; // This device will be called ttyHSL0
+ };
- serial@19c400000 {
+ uart0: serial@19c400000 {
compatible = "qcom,msm-lsuart-v14"
reg = <0x19c40000 0x1000">;
interrupts = <195>;
- cell-index = <0>; // this device will be named ttyHSL0
};
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 7961b78..502d34a 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -272,5 +272,33 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8644.dtsi"
/include/ "mpq8092-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
index f83fe76..f0570ba 100644
--- a/arch/arm/boot/dts/msm-gdsc.dtsi
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -18,41 +18,48 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_venus";
reg = <0xfd8c1024 0x4>;
+ status = "disabled";
};
gdsc_mdss: qcom,gdsc@fd8c2304 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_mdss";
reg = <0xfd8c2304 0x4>;
+ status = "disabled";
};
gdsc_jpeg: qcom,gdsc@fd8c35a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_jpeg";
reg = <0xfd8c35a4 0x4>;
+ status = "disabled";
};
gdsc_vfe: qcom,gdsc@fd8c36a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_vfe";
reg = <0xfd8c36a4 0x4>;
+ status = "disabled";
};
gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_gx";
reg = <0xfd8c4024 0x4>;
+ status = "disabled";
};
gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_cx";
reg = <0xfd8c4034 0x4>;
+ status = "disabled";
};
gdsc_usb_hsic: qcom,gdsc@fc400404 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_usb_hsic";
reg = <0xfc400404 0x4>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 09b57a4..b900c3f 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -12,6 +12,7 @@
/include/ "skeleton.dtsi"
/include/ "msm8226-ion.dtsi"
+/include/ "msm-gdsc.dtsi"
/ {
model = "Qualcomm MSM 8226";
@@ -84,4 +85,28 @@
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm8226-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8910-rumi.dts b/arch/arm/boot/dts/msm8910-rumi.dts
new file mode 100644
index 0000000..0d944aa
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-rumi.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8910.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8910 Rumi";
+ compatible = "qcom,msm8910-rumi", "qcom,msm8910";
+ qcom,msm-id = <147 1 0>;
+
+ serial@f991f000 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8910.dtsi b/arch/arm/boot/dts/msm8910.dtsi
index 2a2e764..1c31e5d 100644
--- a/arch/arm/boot/dts/msm8910.dtsi
+++ b/arch/arm/boot/dts/msm8910.dtsi
@@ -126,6 +126,73 @@
qcom,current-limit = <800>;
};
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ qcom,pil-string = "adsp";
+ interrupts = <0 156 1>;
+ };
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ };
+
+ qcom,smd-wcnss {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <6>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x20000>;
+ qcom,pil-string = "wcnss";
+ interrupts = <0 142 1>;
+ };
+
+ qcom,smsm-wcnss {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <6>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x80000>;
+ interrupts = <0 144 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ };
+ };
};
/include/ "msm8910-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 15fb799..1857121 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -109,6 +109,22 @@
};
};
+ i2c@f9967000 {
+ sii8334@72 {
+ compatible = "qcom,mhl-sii8334";
+ reg = <0x72>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <82 0x8>;
+ mhl-intr-gpio = <&msmgpio 82 0>;
+ mhl-pwr-gpio = <&msmgpio 12 0>;
+ mhl-rst-gpio = <&pm8941_mpps 8 0>;
+ avcc_18-supply = <&pm8941_l24>;
+ avcc_12-supply = <&pm8941_l2>;
+ smps3a-supply = <&pm8941_s3>;
+ vdda-supply = <&pm8941_l12>;
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -256,6 +272,13 @@
};
gpio@cb00 { /* GPIO 12 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-select = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@cc00 { /* GPIO 13 */
@@ -384,6 +407,12 @@
};
mpp@a700 { /* MPP 8 */
+ qcom,mode = <1>; /* DIG_OUT */
+ qcom,output-type = <0>; /* CMOS */
+ qcom,pull-up = <0>;
+ qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
+ qcom,src-select = <0>; /* CONSTANT */
+ qcom,master-en = <1>; /* ENABLE MPP */
};
};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index f391621..96889aa 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -370,9 +370,25 @@
};
gpio@e000 { /* GPIO 33 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,invert = <1>;
+ qcom,master-en = <1>;
};
gpio@e100 { /* GPIO 34 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-sel = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,invert = <0>;
+ qcom,master-en = <1>;
};
gpio@e200 { /* GPIO 35 */
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 495d3fb..3f7e9de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -71,7 +71,6 @@
rpm-regulator-smpb2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8841_s2: regulator-s2 {
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1050000>;
@@ -131,7 +130,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_s2: regulator-s2 {
regulator-min-microvolt = <2150000>;
regulator-max-microvolt = <2150000>;
@@ -284,7 +282,6 @@
rpm-regulator-ldoa12 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_l12: regulator-l12 {
parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index aaf8da4..1bad657 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -156,6 +156,19 @@
qcom,bus-width = <8>;
qcom,nonremovable;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ qcom,msm-bus,name = "sdcc1";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
+ <78 512 6656 13312>, /* 13 MB/s*/
+ <78 512 13312 26624>, /* 26 MB/s */
+ <78 512 26624 53248>, /* 52 MB/s */
+ <78 512 53248 106496>, /* 104 MB/s */
+ <78 512 106496 212992>, /* 208 MB/s */
+ <78 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <42>;
};
@@ -190,6 +203,19 @@
qcom,xpc;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
qcom,current-limit = <800>;
+
+ qcom,msm-bus,name = "sdcc2";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 6656 13312>, /* 13 MB/s*/
+ <81 512 13312 26624>, /* 26 MB/s */
+ <81 512 26624 53248>, /* 52 MB/s */
+ <81 512 53248 106496>, /* 104 MB/s */
+ <81 512 106496 212992>, /* 208 MB/s */
+ <81 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <44>;
};
@@ -222,6 +248,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc3";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
+ <79 512 6656 13312>, /* 13 MB/s*/
+ <79 512 13312 26624>, /* 26 MB/s */
+ <79 512 26624 53248>, /* 52 MB/s */
+ <79 512 53248 106496>, /* 104 MB/s */
+ <79 512 106496 212992>, /* 208 MB/s */
+ <79 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -254,6 +293,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc4";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
+ <80 512 6656 13312>, /* 13 MB/s*/
+ <80 512 13312 26624>, /* 26 MB/s */
+ <80 512 26624 53248>, /* 52 MB/s */
+ <80 512 53248 106496>, /* 104 MB/s */
+ <80 512 106496 212992>, /* 208 MB/s */
+ <80 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -934,7 +986,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
@@ -1285,6 +1337,34 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8x41-rpm-regulator.dtsi"
/include/ "msm-pm8841.dtsi"
/include/ "msm-pm8941.dtsi"
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index b128648..24f616d 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -23,7 +23,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8019_s2: regulator-s2 {
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <1250000>;
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index ab0e647..09c388f 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -441,6 +441,11 @@
qcom,msm-dai-q6 {
compatible = "qcom,msm-dai-q6";
};
+
+ qcom,mss {
+ compatible = "qcom,pil-q6v5-mss";
+ interrupts = <0 24 1>;
+ };
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index a613932..d5e15f1 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -414,7 +414,6 @@
CONFIG_USB_EHCI_MSM_HOST4=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 0d63836..386f311 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -417,7 +417,6 @@
CONFIG_USB_EHCI_MSM_HOST4=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 0070e22..6517945 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -37,7 +37,6 @@
CONFIG_EFI_PARTITION=y
CONFIG_ARCH_MSM=y
CONFIG_ARCH_MSM8974=y
-CONFIG_ARCH_MSM8226=y
CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
# CONFIG_MSM_STACKED_MEMORY is not set
CONFIG_CPU_HAS_L2_PMU=y
@@ -75,6 +74,7 @@
CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index b9add04..97fef39 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -42,6 +42,9 @@
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_MSM_DLOAD_MODE=y
@@ -125,6 +128,7 @@
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
+CONFIG_MSM_BUS_SCALING=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
@@ -251,3 +255,9 @@
CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_MSM_ADSP_LOADER=m
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index b9d0527..10c4d6c 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -977,7 +977,11 @@
/* Fall through. */
case CPU_UP_CANCELED:
acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
+
+ regulator_disable(sc->vreg[VREG_CORE].reg);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
+ regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
+ sc->vreg[VREG_CORE].max_vdd);
break;
case CPU_UP_PREPARE:
if (!sc->initialized) {
@@ -988,10 +992,20 @@
}
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
+
+ rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ sc->vreg[VREG_CORE].cur_vdd,
+ sc->vreg[VREG_CORE].max_vdd);
+ if (rc < 0)
+ return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
+ rc = regulator_enable(sc->vreg[VREG_CORE].reg);
+ if (rc < 0)
+ return NOTIFY_BAD;
+
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
break;
default:
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 851f7d9..a66495d 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -455,7 +455,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index 947697a..eaebea0 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -449,7 +449,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index 3ee052b..9a2967a 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -487,7 +487,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index f9e2c8e..397411d 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -382,7 +382,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 35b68b1..39060ad 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -925,6 +925,9 @@
&msm_cpudai_sec_auxpcm_rx,
&msm_cpudai_sec_auxpcm_tx,
&msm_cpudai_stub,
+ &msm_cpudai_incall_music_rx,
+ &msm_cpudai_incall_record_rx,
+ &msm_cpudai_incall_record_tx,
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 33ec10a..b284168 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -472,7 +472,6 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
- .parent = &cxo_clk_src.c,
.dbg_name = "apcspll_clk_src",
.ops = &clk_ops_local_pll,
CLK_INIT(apcspll_clk_src.c),
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 3888a4e..e55e9a7 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -562,6 +562,21 @@
.id = -1,
};
+struct platform_device msm_cpudai_incall_music_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8005,
+};
+
+struct platform_device msm_cpudai_incall_record_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8004,
+};
+
+struct platform_device msm_cpudai_incall_record_tx = {
+ .name = "msm-dai-q6",
+ .id = 0x8003,
+};
+
struct platform_device msm_i2s_cpudai0 = {
.name = "msm-dai-q6",
.id = PRIMARY_I2S_RX,
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
new file mode 100644
index 0000000..0f689ac
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -0,0 +1,458 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/sps.h>
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+ IPA_BYPASS_NAT,
+ IPA_SRC_NAT,
+ IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ */
+enum ipa_mode_type {
+ IPA_BASIC,
+ IPA_ENABLE_FRAMING_HDLC,
+ IPA_ENABLE_DEFRAMING_HDLC,
+ IPA_DMA,
+};
+
+/**
+ * enum ipa_aggr_en_type - aggregation setting type in IPA
+ * end-point
+ */
+enum ipa_aggr_en_type {
+ IPA_BYPASS_AGGR,
+ IPA_ENABLE_AGGR,
+ IPA_ENABLE_DEAGGR,
+};
+
+/**
+ * enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+ IPA_MBIM_16,
+ IPA_MBIM_32,
+ IPA_TLP,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+ IPA_MBIM,
+ IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+ IPA_RECEIVE,
+ IPA_WRITE_DONE,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en: This defines the default NAT mode for the pipe: in case of
+ * filter miss - the default NAT mode defines the NATing operation
+ * on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+ enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ * @hdr_len: Header length in bytes to be added/removed. Assuming header len
+ * is constant per endpoint. Valid for both Input and Output Pipes
+ * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no
+ * metadata within header.
+ * 1: Metadata_Ofst value is valid, i.e., metadata
+ * within header is in offset Metadata_Ofst Valid
+ * for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_ofst_metadata: Offset within header in which metadata resides
+ * Size of metadata - 4bytes
+ * Example - Stream ID/SSID/mux ID.
+ * Valid for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_additional_const_len: Defines the constant length that should be added
+ * to the payload length in order for IPA to update
+ * correctly the length field within the header
+ * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no
+ * length field within the inserted header
+ * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a
+ * packet length field resides within the header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon
+ * Header Insertion, IPA will update this field within the
+ * header with the packet length . Assumption is that
+ * header length field size is constant and is 2Bytes
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet.
+ * This bit is valid only when Hdr_En=01(Header Insertion)
+ * SW should set this bit for IPA-to-A5 pipes.
+ * 0: Do not insert A5 Mux Header
+ * 1: Insert A5 Mux Header
+ * Valid for Output Pipes (IPA Producer)
+ */
+struct ipa_ep_cfg_hdr {
+ u32 hdr_len;
+ u32 hdr_ofst_metadata_valid;
+ u32 hdr_ofst_metadata;
+ u32 hdr_additional_const_len;
+ u32 hdr_ofst_pkt_size_valid;
+ u32 hdr_ofst_pkt_size;
+ u32 hdr_a5_mux;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode: Valid for Input Pipes only (IPA Consumer)
+ * @dst: This parameter specifies the output pipe to which the packets
+ * will be routed to.
+ * This parameter is valid for Mode=DMA and not valid for
+ * Mode=Basic
+ * Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+ enum ipa_mode_type mode;
+ enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ * @aggr_en: Valid for both Input and Output Pipes
+ * @aggr: Valid for both Input and Output Pipes
+ * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set
+ * to 0, there is no size limitation on the aggregation.
+ * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ * to 0, there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0,
+ * there is no time limitation on the aggregation. When
+ * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ * there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_aggr {
+ enum ipa_aggr_en_type aggr_en;
+ enum ipa_aggr_type aggr;
+ u32 aggr_byte_limit;
+ u32 aggr_time_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl: Defines the default routing table index to be used in case there
+ * is no filter rule matching, valid for Input Pipes only (IPA
+ * Consumer). Clients should set this to 0 which will cause default
+ * v4 and v6 routes setup internally by IPA driver to be used for
+ * this end-point
+ */
+struct ipa_ep_cfg_route {
+ u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat: NAT parmeters
+ * @hdr: Header parameters
+ * @mode: Mode parameters
+ * @aggr: Aggregation parameters
+ * @route: Routing parameters
+ */
+struct ipa_ep_cfg {
+ struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_hdr hdr;
+ struct ipa_ep_cfg_mode mode;
+ struct ipa_ep_cfg_aggr aggr;
+ struct ipa_ep_cfg_route route;
+};
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @client_bam_hdl: client SPS handle
+ * @client_ep_idx: client PER EP index
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @desc_fifo_sz: size of desc FIFO
+ * @data_fifo_sz: size of data FIFO
+ * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback
+ * to sys mem if pipe mem alloc fails
+ * @desc: desc FIFO meta-data when client has allocated it
+ * @data: data FIFO meta-data when client has allocated it
+ */
+struct ipa_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 client_bam_hdl;
+ u32 client_ep_idx;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ u32 desc_fifo_sz;
+ u32 data_fifo_sz;
+ bool pipe_mem_preferred;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_sps_params - SPS related output parameters resulting from
+ * low/high level client connect
+ * @ipa_bam_hdl: IPA SPS handle
+ * @ipa_ep_idx: IPA PER EP index
+ * @desc: desc FIFO meta-data
+ * @data: data FIFO meta-data
+ */
+struct ipa_sps_params {
+ u32 ipa_bam_hdl;
+ u32 ipa_ep_idx;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props: number of tx properties
+ * @prop: the tx properties array
+ */
+struct ipa_tx_intf {
+ u32 num_props;
+ struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props: number of rx properties
+ * @prop: the rx properties array
+ */
+struct ipa_rx_intf {
+ u32 num_props;
+ struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie
+ * evt - type of event
+ * data - data relevant to event. May not be valid. See event_type
+ * enum for valid cases.
+ */
+struct ipa_sys_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 desc_fifo_sz;
+ void *priv;
+ void (*notify)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+};
+
+/**
+ * struct ipa_msg_meta_wrapper - message meta-data wrapper
+ * @meta: the meta-data itself
+ * @link: opaque to client
+ * @meta_wrapper_free: function to free the metadata wrapper when IPA driver
+ * is done with it
+ */
+struct ipa_msg_meta_wrapper {
+ struct ipa_msg_meta meta;
+ struct list_head link;
+ void (*meta_wrapper_free)(struct ipa_msg_meta_wrapper *buff);
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @mbim_stream_id: the stream ID used in NDP signature
+ * @mbim_stream_id_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+ u8 mbim_stream_id;
+ bool mbim_stream_id_valid;
+};
+
+/**
+ * struct ipa_msg_wrapper - message wrapper
+ * @msg: the message buffer itself, MUST exist after call returns, will
+ * be freed by IPA driver when it is done with it
+ * @link: opaque to client
+ * @msg_free: function to free the message when IPA driver is done with it
+ * @msg_wrapper_free: function to free the message wrapper when IPA driver is
+ * done with it
+ */
+struct ipa_msg_wrapper {
+ void *msg;
+ struct list_head link;
+ void (*msg_free)(void *msg);
+ void (*msg_wrapper_free)(struct ipa_msg_wrapper *buff);
+};
+
+/**
+ * typedef ipa_pull_fn - callback function
+ * @buf - [in] the buffer to populate the message into
+ * @sz - [in] the size of the buffer
+ *
+ * callback function registered by kernel client with IPA driver for IPA driver
+ * to be able to pull messages from the kernel client asynchronously.
+ *
+ * Returns how many bytes were copied into the buffer, negative on failure.
+ */
+typedef int (*ipa_pull_fn)(void *buf, uint16_t sz);
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * rmnet bridge
+ */
+int rmnet_bridge_init(void);
+
+int rmnet_bridge_disconnect(void);
+
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 34bdc79..0499a7a 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -111,6 +111,7 @@
MSM_CPU_8092,
MSM_CPU_8226,
MSM_CPU_8910,
+ MSM_CPU_8625Q,
};
enum pmic_model {
@@ -447,6 +448,18 @@
#endif
}
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
static inline int soc_class_is_msm8960(void)
{
return cpu_is_msm8960() || cpu_is_msm8960ab();
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index e0ab983..ea17efe 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -485,7 +485,7 @@
};
#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000220)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
enum bimc_m_priolvl_override {
M_PRIOLVL_OVERRIDE_RMSK = 0x301,
M_PRIOLVL_OVERRIDE_BMSK = 0x300,
@@ -495,10 +495,10 @@
};
#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
enum bimc_m_read_command_override {
- M_RD_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x300000,
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
@@ -529,13 +529,15 @@
};
#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
enum bimc_m_write_command_override {
- M_WR_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x30000,
- M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x10,
- M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x7000,
- M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
@@ -544,8 +546,10 @@
M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x20,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
@@ -1454,7 +1458,7 @@
* boundary in future
*/
wmb();
- set_qos_mode(binfo->base, mas_index, 1, 1, 1);
+ set_qos_mode(binfo->base, mas_index, 0, 1, 1);
break;
case BIMC_QOS_MODE_BYPASS:
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
index f0f5cd8..cfd84eb 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -1049,9 +1049,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
- .prio_lvl = 0,
- .prio_rd = 2,
- .prio_wr = 2,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_AMPSS_M1,
@@ -1064,6 +1063,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_MSS_PROC,
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 6ae7544..6e8d127 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -303,6 +303,7 @@
return IRQ_HANDLED;
}
+ disable_irq_nosync(drv->irq);
drv->restart_inprogress = true;
restart_wcnss(drv);
@@ -324,7 +325,6 @@
pil_shutdown(&drv->desc);
flush_delayed_work(&drv->cancel_vote_work);
wcnss_flush_delayed_boot_votes();
- disable_irq_nosync(drv->irq);
return 0;
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 7652d74..5bed8b4 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -729,8 +729,8 @@
return -ENOMEM;
platform_set_drvdata(pdev, drv);
- of_property_read_u32(pdev->dev.of_node, "qcom,is_loadable",
- &drv->is_loadable);
+ drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,is-loadable");
if (drv->is_loadable) {
ret = pil_mss_loadable_init(drv, pdev);
if (ret)
diff --git a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
index 2d0607c..409d796 100644
--- a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
+++ b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
@@ -24,6 +24,7 @@
#include <mach/qdsp6v2/rtac.h>
#include "q6audio_common.h"
#include <sound/q6afe-v2.h>
+#include <sound/apr_audio-v2.h>
#ifndef CONFIG_RTAC
@@ -45,10 +46,6 @@
#else
-#define VOICE_CMD_SET_PARAM 0x00011006
-#define VOICE_CMD_GET_PARAM 0x00011007
-#define VOICE_EVT_GET_PARAM_ACK 0x00011008
-
/* Max size of payload (buf size - apr header) */
#define MAX_PAYLOAD_SIZE 4076
#define RTAC_MAX_ACTIVE_DEVICES 4
@@ -353,7 +350,7 @@
return;
}
-static int get_voice_index(u32 cvs_handle)
+static int get_voice_index_cvs(u32 cvs_handle)
{
u32 i;
@@ -367,6 +364,32 @@
return 0;
}
+static int get_voice_index_cvp(u32 cvp_handle)
+{
+ u32 i;
+
+ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvp_handle == cvp_handle)
+ return i;
+ }
+
+ pr_err("%s: No voice index for CVP handle %d found returning 0\n",
+ __func__, cvp_handle);
+ return 0;
+}
+
+static int get_voice_index(u32 mode, u32 handle)
+{
+ if (mode == RTAC_CVP)
+ return get_voice_index_cvp(handle);
+ if (mode == RTAC_CVS)
+ return get_voice_index_cvs(handle);
+
+ pr_err("%s: Invalid mode %d, returning 0\n",
+ __func__, mode);
+ return 0;
+}
+
/* ADM APR */
void rtac_set_adm_handle(void *handle)
@@ -402,6 +425,7 @@
if (payload_size > rtac_adm_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_adm_user_buf_size, payload_size);
+ rtac_adm_payload_size = 0;
goto done;
}
memcpy(rtac_adm_buffer + sizeof(u32), payload, payload_size);
@@ -470,6 +494,7 @@
/* Set globals for copy of returned payload */
rtac_adm_user_buf_size = count;
+
/* Copy buffer to in-band payload */
if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
buf + 3 * sizeof(u32), payload_size)) {
@@ -572,6 +597,7 @@
if (payload_size > rtac_asm_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_asm_user_buf_size, payload_size);
+ rtac_asm_payload_size = 0;
goto done;
}
memcpy(rtac_asm_buffer + sizeof(u32), payload, payload_size);
@@ -619,6 +645,7 @@
__func__);
goto done;
}
+
if (session_id > (SESSION_MAX + 1)) {
pr_err("%s: Invalid Session = %d\n", __func__, session_id);
goto done;
@@ -739,6 +766,7 @@
if (payload_size > rtac_voice_user_buf_size) {
pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
__func__, rtac_voice_user_buf_size, payload_size);
+ rtac_voice_payload_size = 0;
goto done;
}
memcpy(rtac_voice_buffer + sizeof(u32), payload, payload_size);
@@ -753,7 +781,7 @@
u32 count = 0;
u32 bytes_returned = 0;
u32 payload_size;
- u16 dest_port;
+ u32 dest_port;
struct apr_hdr voice_params;
pr_debug("%s\n", __func__);
@@ -818,10 +846,10 @@
voice_params.src_svc = 0;
voice_params.src_domain = APR_DOMAIN_APPS;
voice_params.src_port = voice_session_id[
- get_voice_index(dest_port)];
+ get_voice_index(mode, dest_port)];
voice_params.dest_svc = 0;
voice_params.dest_domain = APR_DOMAIN_MODEM;
- voice_params.dest_port = dest_port;
+ voice_params.dest_port = (u16)dest_port;
voice_params.token = 0;
voice_params.opcode = opcode;
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index d1c61fe..bb33283 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -659,19 +659,6 @@
return uV;
}
-static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct rpm_regulator *reg = rdev_get_drvdata(rdev);
- int uV = 0;
-
- if (selector == 0)
- uV = reg->min_uV;
- else if (selector == 1)
- uV = reg->max_uV;
-
- return uV;
-}
-
static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
@@ -1030,7 +1017,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1043,7 +1029,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1056,7 +1041,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1069,7 +1053,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1089,7 +1072,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.enable_time = rpm_vreg_enable_time,
};
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 764fbeb..b6fb88c 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -152,6 +152,8 @@
LIST_HEAD(msm_rpm_ack_list);
+static DECLARE_COMPLETION(data_ready);
+
static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
struct msm_rpm_kvp_data *kvp)
{
@@ -340,7 +342,7 @@
switch (event) {
case SMD_EVENT_DATA:
- queue_work(msm_rpm_smd_wq, &pdata->work);
+ complete(&data_ready);
break;
case SMD_EVENT_OPEN:
complete(&pdata->smd_open);
@@ -530,17 +532,19 @@
int errno;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
- if (!spin_trylock(&msm_rpm_data.smd_lock_read))
- return;
- while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
- if (msm_rpm_read_smd_data(buf)) {
- break;
+ while (1) {
+ wait_for_completion(&data_ready);
+
+ spin_lock(&msm_rpm_data.smd_lock_read);
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ if (msm_rpm_read_smd_data(buf))
+ break;
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
}
- msg_id = msm_rpm_get_msg_id_from_ack(buf);
- errno = msm_rpm_get_error_from_ack(buf);
- msm_rpm_process_ack(msg_id, errno);
+ spin_unlock(&msm_rpm_data.smd_lock_read);
}
- spin_unlock(&msm_rpm_data.smd_lock_read);
}
#define DEBUG_PRINT_BUFFER_SIZE 512
@@ -892,6 +896,9 @@
msm_rpm_free_list_entry(elem);
wait_ack_cleanup:
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ complete(&data_ready);
return rc;
}
EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
@@ -1013,6 +1020,7 @@
msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
if (!msm_rpm_smd_wq)
return -EINVAL;
+ queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
}
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/arch/arm/mach-msm/saw-regulator.c b/arch/arm/mach-msm/saw-regulator.c
index 6762648..0a81a33 100644
--- a/arch/arm/mach-msm/saw-regulator.c
+++ b/arch/arm/mach-msm/saw-regulator.c
@@ -54,11 +54,17 @@
struct regulator_dev *rdev;
char *name;
int uV;
+ int last_set_uV;
+ unsigned vlevel;
+ bool online;
};
/* Minimum core operating voltage */
#define MIN_CORE_VOLTAGE 950000
+/* Specifies an uninitialized voltage */
+#define INVALID_VOLTAGE -1
+
/* Specifies the PMIC internal slew rate in uV/us. */
#define REGULATOR_SLEW_RATE 1250
@@ -69,12 +75,32 @@
return vreg->uV;
}
+static int _set_voltage(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = msm_spm_set_vdd(rdev_get_id(rdev), vreg->vlevel);
+ if (!rc) {
+ if (vreg->uV > vreg->last_set_uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((vreg->uV - vreg->last_set_uV) /
+ REGULATOR_SLEW_RATE);
+ }
+ vreg->last_set_uV = vreg->uV;
+ } else {
+ pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
+ vreg->uV = vreg->last_set_uV;
+ }
+
+ return rc;
+}
+
static int saw_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
unsigned *selector)
{
struct saw_vreg *vreg = rdev_get_drvdata(rdev);
int uV = min_uV;
- int rc;
u8 vprog, band;
if (uV < FTSMPS_BAND1_UV_MIN && max_uV >= FTSMPS_BAND1_UV_MIN)
@@ -119,23 +145,51 @@
return -EINVAL;
}
- rc = msm_spm_set_vdd(rdev_get_id(rdev), band | vprog);
- if (!rc) {
- if (uV > vreg->uV) {
- /* Wait for voltage to stabalize. */
- udelay((uV - vreg->uV) / REGULATOR_SLEW_RATE);
- }
- vreg->uV = uV;
- } else {
- pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
- }
+ vreg->vlevel = band | vprog;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _set_voltage(rdev);
+}
+
+static int saw_enable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ if (vreg->uV != vreg->last_set_uV)
+ rc = _set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
return rc;
}
+static int saw_disable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int saw_is_enabled(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
static struct regulator_ops saw_ops = {
.get_voltage = saw_get_voltage,
.set_voltage = saw_set_voltage,
+ .enable = saw_enable,
+ .disable = saw_disable,
+ .is_enabled = saw_is_enabled,
};
static int __devinit saw_probe(struct platform_device *pdev)
@@ -168,12 +222,13 @@
goto free_vreg;
}
- vreg->desc.name = vreg->name;
- vreg->desc.id = pdev->id;
- vreg->desc.ops = &saw_ops;
- vreg->desc.type = REGULATOR_VOLTAGE;
- vreg->desc.owner = THIS_MODULE;
- vreg->uV = MIN_CORE_VOLTAGE;
+ vreg->desc.name = vreg->name;
+ vreg->desc.id = pdev->id;
+ vreg->desc.ops = &saw_ops;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->uV = INVALID_VOLTAGE;
+ vreg->last_set_uV = MIN_CORE_VOLTAGE;
vreg->rdev = regulator_register(&vreg->desc, &pdev->dev,
init_data, vreg, NULL);
@@ -233,5 +288,4 @@
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAW regulator driver");
-MODULE_VERSION("1.0");
MODULE_ALIAS("platform:saw-regulator");
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 6013efc..f4dae89 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -204,10 +204,13 @@
return ret;
}
-static u32 cacheline_size;
-
static void scm_inv_range(unsigned long start, unsigned long end)
{
+ u32 cacheline_size, ctr;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
start = round_down(start, cacheline_size);
end = round_up(end, cacheline_size);
outer_inv_range(start, end);
@@ -444,13 +447,3 @@
}
EXPORT_SYMBOL(scm_get_feat_version);
-static int scm_init(void)
-{
- u32 ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- return 0;
-}
-early_initcall(scm_init);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 6cb9339..2743547 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -303,7 +303,13 @@
[154] = MSM_CPU_8930AB,
[155] = MSM_CPU_8930AB,
[156] = MSM_CPU_8930AB,
- [157] = MSM_CPU_8930AB
+ [157] = MSM_CPU_8930AB,
+
+ /* 8625Q IDs */
+ [168] = MSM_CPU_8625Q,
+ [169] = MSM_CPU_8625Q,
+ [170] = MSM_CPU_8625Q,
+
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
diff --git a/block/test-iosched.c b/block/test-iosched.c
index 52070ac..71e8669 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -663,7 +663,7 @@
test_name = ptd->test_info.get_test_case_str_fn(ptd);
else
test_name = "Unknown testcase";
- test_pr_info("%s: Starting test %s\n", __func__, test_name);
+ test_pr_info("%s: Starting test %s", __func__, test_name);
ret = prepare_test(ptd);
if (ret) {
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 49543a4..b2a7f71 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -1645,6 +1645,7 @@
static const struct v4l2_ioctl_ops msm_ioctl_ops_server = {
.vidioc_subscribe_event = msm_server_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = msm_server_v4l2_unsubscribe_event,
.vidioc_default = msm_ioctl_server,
};
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 5e39d25..387c254 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -17,7 +17,7 @@
#include <linux/proc_fs.h>
#include <linux/ioctl.h>
#include <mach/camera.h>
-#include "msm.h"
+#include "../msm.h"
uint32_t msm_cam_server_get_mctl_handle(void);
struct iommu_domain *msm_cam_server_get_domain(void);
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 82f9e58..5161b7b 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -396,6 +396,7 @@
if (rc == 0 && atomic_read(&dev->vp_enabled) == 1) {
/* This should not happen, if it does hw is stuck */
disable_irq_nosync(dev->vpirq->start);
+ atomic_set(&dev->vp_enabled, 0);
pr_err("%s: VP Timeout and VP still running\n",
__func__);
}
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index c5551b8..35bb4ac 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -554,105 +554,105 @@
return NULL;
}
- switch (td->test_info.testcase) {
+switch (td->test_info.testcase) {
case TEST_STOP_DUE_TO_FLUSH:
- return " stop due to flush";
+ return "\"stop due to flush\"";
case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
- return " stop due to flush after max-1 reqs";
+ return "\"stop due to flush after max-1 reqs\"";
case TEST_STOP_DUE_TO_READ:
- return " stop due to read";
+ return "\"stop due to read\"";
case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
- return "Test stop due to read after max-1 reqs";
+ return "\"stop due to read after max-1 reqs\"";
case TEST_STOP_DUE_TO_EMPTY_QUEUE:
- return "Test stop due to empty queue";
+ return "\"stop due to empty queue\"";
case TEST_STOP_DUE_TO_MAX_REQ_NUM:
- return "Test stop due to max req num";
+ return "\"stop due to max req num\"";
case TEST_STOP_DUE_TO_THRESHOLD:
- return "Test stop due to exceeding threshold";
+ return "\"stop due to exceeding threshold\"";
case TEST_RET_ABORT:
- return "Test err_check return abort";
+ return "\"err_check return abort\"";
case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
- return "Test err_check return partial followed by success";
+ return "\"err_check return partial followed by success\"";
case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
- return "Test err_check return partial followed by abort";
+ return "\"err_check return partial followed by abort\"";
case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
- return "Test err_check return partial multiple until success";
+ return "\"err_check return partial multiple until success\"";
case TEST_RET_PARTIAL_MAX_FAIL_IDX:
- return "Test err_check return partial max fail index";
+ return "\"err_check return partial max fail index\"";
case TEST_RET_RETRY:
- return "Test err_check return retry";
+ return "\"err_check return retry\"";
case TEST_RET_CMD_ERR:
- return "Test err_check return cmd error";
+ return "\"err_check return cmd error\"";
case TEST_RET_DATA_ERR:
- return "Test err_check return data error";
+ return "\"err_check return data error\"";
case TEST_HDR_INVALID_VERSION:
- return "Test invalid - wrong header version";
+ return "\"invalid - wrong header version\"";
case TEST_HDR_WRONG_WRITE_CODE:
- return "Test invalid - wrong write code";
+ return "\"invalid - wrong write code\"";
case TEST_HDR_INVALID_RW_CODE:
- return "Test invalid - wrong R/W code";
+ return "\"invalid - wrong R/W code\"";
case TEST_HDR_DIFFERENT_ADDRESSES:
- return "Test invalid - header different addresses";
+ return "\"invalid - header different addresses\"";
case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
- return "Test invalid - header req num smaller than actual";
+ return "\"invalid - header req num smaller than actual\"";
case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
- return "Test invalid - header req num larger than actual";
+ return "\"invalid - header req num larger than actual\"";
case TEST_HDR_CMD23_PACKED_BIT_SET:
- return "Test invalid - header cmd23 packed bit set";
+ return "\"invalid - header cmd23 packed bit set\"";
case TEST_CMD23_MAX_PACKED_WRITES:
- return "Test invalid - cmd23 max packed writes";
+ return "\"invalid - cmd23 max packed writes\"";
case TEST_CMD23_ZERO_PACKED_WRITES:
- return "Test invalid - cmd23 zero packed writes";
+ return "\"invalid - cmd23 zero packed writes\"";
case TEST_CMD23_PACKED_BIT_UNSET:
- return "Test invalid - cmd23 packed bit unset";
+ return "\"invalid - cmd23 packed bit unset\"";
case TEST_CMD23_REL_WR_BIT_SET:
- return "Test invalid - cmd23 rel wr bit set";
+ return "\"invalid - cmd23 rel wr bit set\"";
case TEST_CMD23_BITS_16TO29_SET:
- return "Test invalid - cmd23 bits [16-29] set";
+ return "\"invalid - cmd23 bits [16-29] set\"";
case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
- return "Test invalid - cmd23 header block not in count";
+ return "\"invalid - cmd23 header block not in count\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER:
- return "\nTest packing control - pack n";
+ return "\"packing control - pack n\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
- return "\nTest packing control - pack n followed by read";
+ return "\"packing control - pack n followed by read\"";
case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
- return "\nTest packing control - pack n followed by flush";
+ return "\"packing control - pack n followed by flush\"";
case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
- return "\nTest packing control - pack one followed by read";
+ return "\"packing control - pack one followed by read\"";
case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
- return "\nTest packing control - pack threshold";
+ return "\"packing control - pack threshold\"";
case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
- return "\nTest packing control - no packing";
+ return "\"packing control - no packing\"";
case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
- return "\nTest packing control - no packing, trigger requests";
+ return "\"packing control - no packing, trigger requests\"";
case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
- return "\nTest packing control - no pack, trigger-read-trigger";
+ return "\"packing control - no pack, trigger-read-trigger\"";
case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
- return "\nTest packing control- no pack, trigger-flush-trigger";
+ return "\"packing control- no pack, trigger-flush-trigger\"";
case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
- return "\nTest packing control - mix: pack -> no pack -> pack";
+ return "\"packing control - mix: pack -> no pack -> pack\"";
case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
- return "\nTest packing control - mix: no pack->pack->no pack";
+ return "\"packing control - mix: no pack->pack->no pack\"";
case TEST_WRITE_DISCARD_SANITIZE_READ:
- return "\nTest write, discard, sanitize";
+ return "\"write, discard, sanitize\"";
case BKOPS_DELAYED_WORK_LEVEL_1:
- return "\nTest delayed work BKOPS level 1";
+ return "\"delayed work BKOPS level 1\"";
case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
- return "\nTest delayed work BKOPS level 1 with HPI";
+ return "\"delayed work BKOPS level 1 with HPI\"";
case BKOPS_CANCEL_DELAYED_WORK:
- return "\nTest cancel delayed BKOPS work";
+ return "\"cancel delayed BKOPS work\"";
case BKOPS_URGENT_LEVEL_2:
- return "\nTest urgent BKOPS level 2";
+ return "\"urgent BKOPS level 2\"";
case BKOPS_URGENT_LEVEL_2_TWO_REQS:
- return "\nTest urgent BKOPS level 2, followed by a request";
+ return "\"urgent BKOPS level 2, followed by a request\"";
case BKOPS_URGENT_LEVEL_3:
- return "\nTest urgent BKOPS level 3";
+ return "\"urgent BKOPS level 3\"";
case TEST_LONG_SEQUENTIAL_READ:
- return "Test long sequential read";
+ return "\"long sequential read\"";
case TEST_LONG_SEQUENTIAL_WRITE:
- return "Test long sequential write";
+ return "\"long sequential write\"";
default:
- return "Unknown testcase";
+ return " Unknown testcase";
}
return NULL;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 34e1d40..75cc086 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -76,4 +76,18 @@
PNP PMIC. It configures the frequency of clkdiv outputs on the
PMIC. These clocks are typically wired through alternate functions
on gpio pins.
+
+config IPA
+ tristate "IPA support"
+ depends on SPS
+ help
+ This driver supports the Internet Packet Accelerator (IPA) core.
+ IPA is a programmable protocol processor HW block.
+ It is designed to support generic HW processing of UL/DL IP packets
+ for various use cases independent of radio technology.
+ The driver support client connection and configuration
+ for the IPA core.
+ Kernel and user-space processes can call the IPA driver
+ to configure IPA core.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 35efd91..0a755d3 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MSM_SSBI) += ssbi.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_IPA) += ipa/
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..ded5b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+ ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
new file mode 100644
index 0000000..0ae2552
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+static struct a2_service_cb_type {
+ void *tx_complete_cb;
+ void *rx_cb;
+ u32 producer_handle;
+ u32 consumer_handle;
+} a2_service_cb;
+
+static struct sps_mem_buffer data_mem_buf[2];
+static struct sps_mem_buffer desc_mem_buf[2];
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe);
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl);
+
+/**
+ * a2_mux_initialize() - initialize A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ */
+int a2_mux_initialize(void)
+{
+ (void) msm_bam_dmux_ul_power_vote();
+
+ return 0;
+}
+
+/**
+ * a2_mux_close() - close A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_close(void)
+{
+ int ret = 0;
+
+ (void) msm_bam_dmux_ul_power_unvote();
+
+ ret = ipa_disconnect(a2_service_cb.consumer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = ipa_disconnect(a2_service_cb.producer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+/**
+ * a2_mux_open_port() - open connection to A2
+ * @wwan_logical_channel_id: WWAN logical channel ID
+ * @rx_cb: Rx callback
+ * @tx_complete_cb: Tx completed callback
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb)
+{
+ int ret = 0;
+ u8 src_pipe = 0;
+ u8 dst_pipe = 0;
+ struct sps_pipe *a2_to_ipa_pipe = NULL;
+ struct sps_pipe *ipa_to_a2_pipe = NULL;
+
+ (void) wwan_logical_channel_id;
+
+ a2_service_cb.rx_cb = rx_cb;
+ a2_service_cb.tx_complete_cb = tx_complete_cb;
+
+ ret = connect_pipe_ipa(A2_TO_IPA,
+ &src_pipe,
+ &(a2_service_cb.consumer_handle),
+ a2_to_ipa_pipe);
+ if (ret) {
+ pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
+ goto bail;
+ }
+
+ ret = connect_pipe_ipa(IPA_TO_A2,
+ &dst_pipe,
+ &(a2_service_cb.producer_handle),
+ ipa_to_a2_pipe);
+ if (ret) {
+ pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
+ sps_disconnect(a2_to_ipa_pipe);
+ sps_free_endpoint(a2_to_ipa_pipe);
+ (void) ipa_disconnect(a2_service_cb.consumer_handle);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe)
+{
+ int ret;
+ struct sps_connect connection = {0, };
+ u32 a2_handle = 0;
+ u32 a2_phy_addr = 0;
+ struct a2_mux_pipe_connection pipe_connection = { 0, };
+ struct ipa_connect_params ipa_in_params;
+ struct ipa_sps_params sps_out_params;
+
+ memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+ memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+ if (!usb_pipe_idx || !clnt_hdl) {
+ pr_err("connect_pipe_ipa :: null arguments\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
+ if (ret) {
+ pr_err("ipa_get_a2_mux_pipe_info failed\n");
+ goto bail;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ a2_phy_addr = pipe_connection.src_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+ ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+ ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+ pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
+ pipe_connection.src_pipe_index);
+ ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
+ } else {
+ a2_phy_addr = pipe_connection.dst_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+ ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
+ }
+
+ ret = sps_phy2h(a2_phy_addr, &a2_handle);
+ if (ret) {
+ pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.client_bam_hdl = a2_handle;
+ ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
+ ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
+
+ if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
+ pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
+ ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
+ pipe_connection.data_fifo_base_offset,
+ pipe_connection.data_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: data fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
+ pipe_connection.desc_fifo_base_offset,
+ pipe_connection.desc_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: desc. fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.data = data_mem_buf[pipe_dir];
+ ipa_in_params.desc = desc_mem_buf[pipe_dir];
+ }
+
+ ret = a2_ipa_connect_pipe(&ipa_in_params,
+ &sps_out_params,
+ clnt_hdl);
+ if (ret) {
+ pr_err("-**- USB-IPA info: ipa_connect failed\n");
+ pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
+ goto bail;
+ }
+
+ pipe = sps_alloc_endpoint();
+ if (pipe == NULL) {
+ pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+ ret = -ENOMEM;
+ goto a2_ipa_connect_pipe_failed;
+ }
+
+ ret = sps_get_config(pipe, &connection);
+ if (ret) {
+ pr_err("%s: tx get config failed %d\n", __func__, ret);
+ goto get_config_failed;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ connection.mode = SPS_MODE_SRC;
+ *usb_pipe_idx = connection.src_pipe_index;
+ connection.source = a2_handle;
+ connection.destination = sps_out_params.ipa_bam_hdl;
+ connection.src_pipe_index = pipe_connection.src_pipe_index;
+ connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
+ } else {
+ connection.mode = SPS_MODE_DEST;
+ *usb_pipe_idx = connection.dest_pipe_index;
+ connection.source = sps_out_params.ipa_bam_hdl;
+ connection.destination = a2_handle;
+ connection.src_pipe_index = sps_out_params.ipa_ep_idx;
+ connection.dest_pipe_index = pipe_connection.dst_pipe_index;
+ }
+
+ connection.event_thresh = 16;
+ connection.data = sps_out_params.data;
+ connection.desc = sps_out_params.desc;
+
+ ret = sps_connect(pipe, &connection);
+ if (ret < 0) {
+ pr_err("%s: tx connect error %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = 0;
+ goto bail;
+error:
+ sps_disconnect(pipe);
+get_config_failed:
+ sps_free_endpoint(pipe);
+a2_ipa_connect_pipe_failed:
+ (void) ipa_disconnect(*clnt_hdl);
+bail:
+ return ret;
+}
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl)
+{
+ return ipa_connect(in_params, out_params, clnt_hdl);
+}
+
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
new file mode 100644
index 0000000..80885da
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _A2_SERVICE_H_
+#define _A2_SERVICE_H_
+
+int a2_mux_initialize(void);
+
+int a2_mux_close(void);
+
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb);
+
+#endif /* _A2_SERVICE_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
new file mode 100644
index 0000000..8f68ef5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -0,0 +1,1790 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_READ_MAX (16)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_DMA_POOL_SIZE (512)
+#define IPA_DMA_POOL_ALIGNMENT (4)
+#define IPA_DMA_POOL_BOUNDARY (1024)
+#define WLAN_AMPDU_TX_EP (15)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+struct ipa_plat_drv_res {
+ u32 ipa_mem_base;
+ u32 ipa_mem_size;
+ u32 bam_mem_base;
+ u32 bam_mem_size;
+ u32 ipa_irq;
+ u32 bam_irq;
+ u32 ipa_pipe_mem_start_ofst;
+ u32 ipa_pipe_mem_size;
+ struct a2_mux_pipe_connection a2_to_ipa_pipe;
+ struct a2_mux_pipe_connection ipa_to_a2_pipe;
+};
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+static struct of_device_id ipa_plat_drv_match[] = {
+ {
+ .compatible = "qcom,ipa",
+ },
+
+ {
+ }
+};
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct device *ipa_dev;
+
+struct ipa_context *ipa_ctx;
+
+static bool polling_mode;
+module_param(polling_mode, bool, 0644);
+MODULE_PARM_DESC(polling_mode,
+ "1 - pure polling mode; 0 - interrupt+polling mode");
+static uint polling_delay_ms = 50;
+module_param(polling_delay_ms, uint, 0644);
+MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
+static bool hdr_tbl_lcl = 1;
+module_param(hdr_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
+static bool ip4_rt_tbl_lcl = 1;
+module_param(ip4_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_rt_tbl_lcl,
+ "where ip4 rt tables reside 1-local; 0-system");
+static bool ip6_rt_tbl_lcl = 1;
+module_param(ip6_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_rt_tbl_lcl,
+ "where ip6 rt tables reside 1-local; 0-system");
+static bool ip4_flt_tbl_lcl = 1;
+module_param(ip4_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_flt_tbl_lcl,
+ "where ip4 flt tables reside 1-local; 0-system");
+static bool ip6_flt_tbl_lcl = 1;
+module_param(ip6_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_flt_tbl_lcl,
+ "where ip6 flt tables reside 1-local; 0-system");
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata);
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection);
+
+static void ipa_set_aggregation_params(void);
+
+static ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ u32 reg_val = 0xfeedface;
+ char str[IPA_READ_MAX];
+ int result;
+ static int read_cnt;
+
+ if (read_cnt) {
+ IPAERR("only supports one call to read\n");
+ return 0;
+ }
+
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST);
+ result = scnprintf(str, IPA_READ_MAX, "%x\n", reg_val);
+ if (copy_to_user(buf, str, result))
+ return -EFAULT;
+ read_cnt = 1;
+
+ return result;
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_context *ctx = NULL;
+
+ IPADBG("ENTER\n");
+ ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 header[128] = { 0 };
+ u8 *param = NULL;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_v4_nat_del nat_del;
+
+ IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case IPA_IOC_ALLOC_NAT_MEM:
+ if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_V4_INIT_NAT:
+ if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_init_cmd(&nat_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_NAT_DMA:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz =
+ sizeof(struct ipa_ioc_nat_dma_cmd) +
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+ sizeof(struct ipa_ioc_nat_dma_one);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_V4_DEL_NAT:
+ if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_del_cmd(&nat_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr) +
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+ sizeof(struct ipa_hdr_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr) +
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+ sizeof(struct ipa_hdr_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule) +
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+ sizeof(struct ipa_rt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule) +
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_COMMIT_HDR:
+ retval = ipa_commit_hdr();
+ break;
+ case IPA_IOC_RESET_HDR:
+ retval = ipa_reset_hdr();
+ break;
+ case IPA_IOC_COMMIT_RT:
+ retval = ipa_commit_rt(arg);
+ break;
+ case IPA_IOC_RESET_RT:
+ retval = ipa_reset_rt(arg);
+ break;
+ case IPA_IOC_COMMIT_FLT:
+ retval = ipa_commit_flt(arg);
+ break;
+ case IPA_IOC_RESET_FLT:
+ retval = ipa_reset_flt(arg);
+ break;
+ case IPA_IOC_DUMP:
+ ipa_dump();
+ break;
+ case IPA_IOC_GET_RT_TBL:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_RT_TBL:
+ retval = ipa_put_rt_tbl(arg);
+ break;
+ case IPA_IOC_GET_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_HDR:
+ retval = ipa_put_hdr(arg);
+ break;
+ case IPA_IOC_SET_FLT:
+ retval = ipa_cfg_filter(arg);
+ break;
+ case IPA_IOC_COPY_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ default: /* redundant, as cmd was checked against MAXNR */
+ return -ENOTTY;
+ }
+ kfree(param);
+
+ return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to A5 */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /*
+ * because these tables are the very first to be added, they will both
+ * have the same index (0) which is essential for programming the
+ * "route" end-point config
+ */
+
+ kfree(rt_rule);
+
+ return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ struct ipa_route route = { 0 };
+ int ret;
+
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr) {
+ IPAERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+ strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+
+ /*
+ * only single stream for MBIM supported and no exception packets
+ * expected so set default header to zero
+ */
+ hdr_entry->hdr_len = 1;
+ hdr_entry->hdr[0] = 0;
+
+ /*
+ * SW does not know anything about default exception header so
+ * we don't set it. IPA HW will use it as a template
+ */
+ if (ipa_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* exception packets goto LAN-WAN pipe from IPA to A5 */
+ route.route_def_pipe = IPA_A5_LAN_WAN_IN;
+ route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+ if (ipa_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static void ipa_handle_tx_poll_for_pipe(struct ipa_sys_context *sys)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt, *t;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ int ret;
+
+ while (1) {
+ iov.addr = 0;
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ pr_err("%s: sps_get_iovec failed %d\n", __func__, ret);
+ break;
+ }
+ if (!iov.addr)
+ break;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ tx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ switch (tx_pkt->cnt) {
+ case 1:
+ ipa_write_done(&tx_pkt->work);
+ break;
+ case 0xFFFF:
+ /* reached end of set */
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(tx_pkt, t,
+ &sys->wait_desc_list, link) {
+ list_del(&tx_pkt->link);
+ list_add(&tx_pkt->link, &sys->head_desc_list);
+ }
+ tx_pkt =
+ list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ ipa_write_done(&tx_pkt->work);
+ break;
+ default:
+ /* keep looping till reach the end of the set */
+ spin_lock_irqsave(&sys->spinlock,
+ irq_flags);
+ list_del(&tx_pkt->link);
+ list_add_tail(&tx_pkt->link,
+ &sys->wait_desc_list);
+ spin_unlock_irqrestore(&sys->spinlock,
+ irq_flags);
+ break;
+ }
+ }
+}
+
+static void ipa_poll_function(struct work_struct *work)
+{
+ int ret;
+ int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
+ IPA_A5_WLAN_AMPDU_OUT };
+ int i;
+ int num_tx_pipes;
+
+ /* check all the system pipes for tx completions and rx available */
+ if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
+ ipa_handle_rx_core();
+
+ num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
+
+ if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
+ num_tx_pipes--;
+
+ for (i = 0; i < num_tx_pipes; i++)
+ if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
+ ipa_handle_tx_poll_for_pipe(&ipa_ctx->sys[tx_pipes[i]]);
+
+ /* re-post the poll work */
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+
+ return;
+}
+
+static int ipa_setup_a5_pipes(void)
+{
+ struct ipa_sys_connect_params sys_in;
+ int result = 0;
+
+ /* CMD OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_CMD_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail;
+ }
+
+ if (ipa_setup_exception_path()) {
+ IPAERR(":fail to setup excp path\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+
+ /* LAN-WAN IN (IPA->A5) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = 8; /* size of A5 exception hdr */
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+ /* LAN-WAN OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_data_out;
+ }
+ if (ipa_ctx->polling_mode) {
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ result =
+ schedule_delayed_work_on(smp_processor_id(),
+ &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+ if (!result) {
+ IPAERR(":schedule delayed work failed.\n");
+ goto fail_schedule_delayed_work;
+ }
+ }
+
+ return 0;
+
+fail_schedule_delayed_work:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+fail_data_out:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_cmd:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail:
+ return result;
+}
+
+static void ipa_teardown_a5_pipes(void)
+{
+ cancel_delayed_work(&ipa_ctx->poll_work);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int rc = 0;
+
+ if (!pdata || !pdev)
+ goto err;
+
+ /* retrieve device tree parameters */
+ for_each_child_of_node(pdev->dev.of_node, node)
+ {
+ const char *str;
+
+ rc = of_property_read_string(node, "label", &str);
+ if (rc) {
+ IPAERR("Cannot read string\n");
+ goto err;
+ }
+
+ /* Check if connection type is supported */
+ if (strncmp(str, "a2-to-ipa", 10)
+ && strncmp(str, "ipa-to-a2", 10))
+ goto err;
+
+ if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
+ && IPA_TO_A2 == pipe_dir)
+ continue; /* skip to the next pipe */
+ else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
+ && A2_TO_IPA == pipe_dir)
+ continue; /* skip to the next pipe */
+
+
+ rc = ipa_update_connections_info(node, pdata);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ IPAERR("%s: failed\n", __func__);
+
+ return rc;
+}
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection)
+{
+ u32 rc;
+ char *key;
+ uint32_t val;
+ enum ipa_pipe_mem_type mem_type;
+
+ if (!pipe_connection || !node)
+ goto err;
+
+ key = "qcom,src-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_phy_addr = val;
+
+ key = "qcom,ipa-bam-mem-type";
+ rc = of_property_read_u32(node, key, &mem_type);
+ if (rc)
+ goto err;
+ pipe_connection->mem_type = mem_type;
+
+ key = "qcom,src-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_pipe_index = val;
+
+ key = "qcom,dst-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_phy_addr = val;
+
+ key = "qcom,dst-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_pipe_index = val;
+
+ key = "qcom,data-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_base_offset = val;
+
+ key = "qcom,data-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_size = val;
+
+ key = "qcom,descriptor-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->desc_fifo_base_offset = val;
+
+ key = "qcom,descriptor-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+
+ pipe_connection->desc_fifo_size = val;
+
+ return 0;
+err:
+ IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+
+ return rc;
+}
+
+/**
+* ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
+*
+* @pipe_dir: pipe direction
+* @pipe_connect: connect structure containing the parameters fetched from DTS
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect)
+{
+ if (!pipe_connect) {
+ IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
+ return -EFAULT;
+ }
+
+ switch (pipe_dir) {
+ case A2_TO_IPA:
+ *pipe_connect = ipa_res.a2_to_ipa_pipe;
+ break;
+ case IPA_TO_A2:
+ *pipe_connect = ipa_res.ipa_to_a2_pipe;
+ break;
+ default:
+ IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void ipa_set_aggregation_params(void)
+{
+ struct ipa_ep_cfg_aggr agg_params;
+ u32 producer_hdl = 0;
+ u32 consumer_hdl = 0;
+
+ rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+
+ agg_params.aggr = ipa_ctx->aggregation_type;
+ agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
+ agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
+
+ /* configure aggregation on producer */
+ agg_params.aggr_en = IPA_ENABLE_AGGR;
+ ipa_cfg_ep_aggr(producer_hdl, &agg_params);
+
+ /* configure deaggregation on consumer */
+ agg_params.aggr_en = IPA_ENABLE_DEAGGR;
+ ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
+
+}
+
+/*
+ * The following device attributes are for configuring the aggregation
+ * attributes when the driver is already running.
+ * The attributes are for configuring the aggregation type
+ * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
+ * time limit.
+ */
+static ssize_t ipa_show_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ memset(str, 0, sizeof(str));
+
+ switch (ipa_ctx->aggregation_type) {
+ case IPA_MBIM_16:
+ strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
+ break;
+ case IPA_MBIM_32:
+ strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
+ break;
+ case IPA_TLP:
+ strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
+ break;
+ default:
+ strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
+ break;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
+ ipa_ctx->aggregation_type = IPA_MBIM_32;
+ else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
+ ipa_ctx->aggregation_type = IPA_TLP;
+ else {
+ IPAERR("ipa_store_aggregation_type wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_type,
+ ipa_store_aggregation_type);
+
+static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
+ ipa_ctx->aggregation_byte_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+ char *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_byte_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_byte_limit,
+ ipa_store_aggregation_byte_limit);
+
+static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf,
+ PAGE_SIZE,
+ "%u\n",
+ ipa_ctx->aggregation_time_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_time_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_time_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_time_limit,
+ ipa_store_aggregation_time_limit);
+
+static const struct file_operations ipa_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_open,
+ .read = ipa_read,
+ .unlocked_ioctl = ipa_ioctl,
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+ ipa_cnoc_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(ipa_cnoc_clk)) {
+ ipa_cnoc_clk = NULL;
+ IPAERR("fail to get cnoc clk\n");
+ return -ENODEV;
+ }
+
+ ipa_clk_src = clk_get(dev, "core_src_clk");
+ if (IS_ERR(ipa_clk_src)) {
+ ipa_clk_src = NULL;
+ IPAERR("fail to get ipa clk src\n");
+ return -ENODEV;
+ }
+
+ ipa_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(ipa_clk)) {
+ ipa_clk = NULL;
+ IPAERR("fail to get ipa clk\n");
+ return -ENODEV;
+ }
+
+ sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+ if (IS_ERR(sys_noc_ipa_axi_clk)) {
+ sys_noc_ipa_axi_clk = NULL;
+ IPAERR("fail to get sys_noc_ipa_axi clk\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+ if (ipa_cnoc_clk) {
+ clk_prepare(ipa_cnoc_clk);
+ clk_enable(ipa_cnoc_clk);
+ clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+ } else {
+ WARN_ON(1);
+ }
+
+ if (ipa_clk_src)
+ clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_prepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_prepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_enable(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_enable(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+ if (sys_noc_ipa_axi_clk)
+ clk_disable_unprepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_disable_unprepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_cnoc_clk)
+ clk_disable_unprepare(ipa_cnoc_clk);
+ else
+ WARN_ON(1);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+ void *bam_cnfg_bits;
+
+ bam_cnfg_bits = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+ IPA_BAM_REMAP_SIZE);
+ if (!bam_cnfg_bits)
+ return -ENOMEM;
+ ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
+ IPA_BAM_CNFG_BITS_VAL);
+ iounmap(bam_cnfg_bits);
+
+ return 0;
+}
+/**
+* ipa_init() - Initialize the IPA Driver
+*@resource_p: contain platform specific values from DST file
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+* 1)parsed values from the dts file
+* 2)parameters passed to the module initialization
+* 3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+* is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+* routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p)
+{
+ int result = 0;
+ int i;
+ struct sps_bam_props bam_props = { 0 };
+ struct ipa_flt_tbl *flt_tbl;
+ struct ipa_rt_tbl_set *rset;
+
+ IPADBG("IPA init\n");
+
+ ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+ if (!ipa_ctx) {
+ IPAERR(":kzalloc err.\n");
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+
+ IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
+ ipa_ctx->polling_mode = polling_mode;
+ IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+ hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
+ ip6_flt_tbl_lcl);
+ ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
+ ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
+ ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
+ ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
+ ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
+
+ ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+
+ /* setup IPA register access */
+ ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
+ resource_p->ipa_mem_size);
+ if (!ipa_ctx->mmio) {
+ IPAERR(":ipa-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_remap;
+ }
+ /* do POR programming to setup HW */
+ result = ipa_init_hw();
+ if (result) {
+ IPAERR(":error initializing driver.\n");
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+ /* read how much SRAM is available for SW use */
+ ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST);
+
+ if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
+ IPAERR("SW expect more core memory, needed %d, avail %d\n",
+ IPA_RAM_END_OFST, ipa_ctx->smem_sz);
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+ /* register IPA with SPS driver */
+ bam_props.phys_addr = resource_p->bam_mem_base;
+ bam_props.virt_addr = ioremap(resource_p->bam_mem_base,
+ resource_p->bam_mem_size);
+ if (!bam_props.virt_addr) {
+ IPAERR(":bam-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_bam_remap;
+ }
+ bam_props.virt_size = resource_p->bam_mem_size;
+ bam_props.irq = resource_p->bam_irq;
+ bam_props.num_pipes = IPA_NUM_PIPES;
+ bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+ bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+
+ result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+ if (result) {
+ IPAERR(":bam register err.\n");
+ result = -ENODEV;
+ goto fail_bam_register;
+ }
+
+ if (ipa_setup_bam_cfg(resource_p)) {
+ IPAERR(":bam cfg err.\n");
+ result = -ENODEV;
+ goto fail_flt_rule_cache;
+ }
+
+ /* set up the default op mode */
+ ipa_ctx->mode = IPA_MODE_USB_DONGLE;
+
+ /* init the lookaside cache */
+ ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
+ sizeof(struct ipa_flt_entry), 0, 0, NULL);
+ if (!ipa_ctx->flt_rule_cache) {
+ IPAERR(":ipa flt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_flt_rule_cache;
+ }
+ ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
+ sizeof(struct ipa_rt_entry), 0, 0, NULL);
+ if (!ipa_ctx->rt_rule_cache) {
+ IPAERR(":ipa rt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_rule_cache;
+ }
+ ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
+ sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_cache) {
+ IPAERR(":ipa hdr cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_cache;
+ }
+ ipa_ctx->hdr_offset_cache =
+ kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
+ 0, 0, NULL);
+ if (!ipa_ctx->hdr_offset_cache) {
+ IPAERR(":ipa hdr off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_offset_cache;
+ }
+ ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
+ sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+ if (!ipa_ctx->rt_tbl_cache) {
+ IPAERR(":ipa rt tbl cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_tbl_cache;
+ }
+ ipa_ctx->tx_pkt_wrapper_cache =
+ kmem_cache_create("IPA TX PKT WRAPPER",
+ sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->tx_pkt_wrapper_cache) {
+ IPAERR(":ipa tx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tx_pkt_wrapper_cache;
+ }
+ ipa_ctx->rx_pkt_wrapper_cache =
+ kmem_cache_create("IPA RX PKT WRAPPER",
+ sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->rx_pkt_wrapper_cache) {
+ IPAERR(":ipa rx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rx_pkt_wrapper_cache;
+ }
+ ipa_ctx->tree_node_cache =
+ kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
+ NULL);
+ if (!ipa_ctx->tree_node_cache) {
+ IPAERR(":ipa tree node cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tree_node_cache;
+ }
+
+ /*
+ * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
+ * size 512 bytes
+ */
+ ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k", NULL,
+ IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
+ IPA_DMA_POOL_BOUNDARY);
+ if (!ipa_ctx->one_kb_no_straddle_pool) {
+ IPAERR("cannot setup 1kb alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
+ }
+
+ ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+ ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+ /* init the various list heads */
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+ }
+
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+ mutex_init(&ipa_ctx->lock);
+ mutex_init(&ipa_ctx->nat_mem.lock);
+
+ for (i = 0; i < IPA_A5_SYS_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
+ spin_lock_init(&ipa_ctx->sys[i].spinlock);
+ if (i != IPA_A5_WLAN_AMPDU_OUT)
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
+ else
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].wait_desc_list);
+ }
+
+ ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
+ if (!ipa_ctx->rx_wq) {
+ IPAERR(":fail to create rx wq\n");
+ result = -ENOMEM;
+ goto fail_rx_wq;
+ }
+
+ ipa_ctx->tx_wq = create_singlethread_workqueue("ipa tx wq");
+ if (!ipa_ctx->tx_wq) {
+ IPAERR(":fail to create tx wq\n");
+ result = -ENOMEM;
+ goto fail_tx_wq;
+ }
+
+ ipa_ctx->hdr_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
+ ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
+
+ atomic_set(&ipa_ctx->ipa_active_clients, 0);
+
+ result = ipa_bridge_init();
+ if (result) {
+ IPAERR("ipa bridge init err.\n");
+ result = -ENODEV;
+ goto fail_bridge_init;
+ }
+
+ /* setup the A5-IPA pipes */
+ if (ipa_setup_a5_pipes()) {
+ IPAERR(":failed to setup IPA-A5 pipes.\n");
+ result = -ENODEV;
+ goto fail_a5_pipes;
+ }
+
+ ipa_replenish_rx_cache();
+
+ /* init the filtering block */
+ ipa_commit_flt(IPA_IP_v4);
+ ipa_commit_flt(IPA_IP_v6);
+
+ /*
+ * setup an empty routing table in system memory, this will be used
+ * to delete a routing table cleanly and safely
+ */
+ ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+ ipa_ctx->empty_rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
+ &ipa_ctx->empty_rt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!ipa_ctx->empty_rt_tbl_mem.base) {
+ IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+ ipa_ctx->empty_rt_tbl_mem.size);
+ result = -ENOMEM;
+ goto fail_empty_rt_tbl;
+ }
+ memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+ ipa_ctx->empty_rt_tbl_mem.size);
+
+ /* setup the IPA pipe mem pool */
+ ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+ resource_p->ipa_pipe_mem_size);
+
+ ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+ result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+ ipa_ctx, DRV_NAME);
+ if (IS_ERR(ipa_ctx->dev)) {
+ IPAERR(":device_create err.\n");
+ result = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+ ipa_ctx->cdev.owner = THIS_MODULE;
+ ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ /* default aggregation parameters */
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ ipa_ctx->aggregation_byte_limit = 1;
+ ipa_ctx->aggregation_time_limit = 0;
+ IPADBG(":IPA driver init OK.\n");
+
+ /* gate IPA clocks */
+ ipa_disable_clks();
+
+ return 0;
+
+fail_cdev_add:
+ device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ if (ipa_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+ dma_free_coherent(NULL,
+ ipa_ctx->empty_rt_tbl_mem.size,
+ ipa_ctx->empty_rt_tbl_mem.base,
+ ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_empty_rt_tbl:
+ ipa_cleanup_rx();
+ ipa_teardown_a5_pipes();
+fail_a5_pipes:
+ ipa_bridge_cleanup();
+fail_bridge_init:
+ destroy_workqueue(ipa_ctx->tx_wq);
+fail_tx_wq:
+ destroy_workqueue(ipa_ctx->rx_wq);
+fail_rx_wq:
+ dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+fail_dma_pool:
+ kmem_cache_destroy(ipa_ctx->tree_node_cache);
+fail_tree_node_cache:
+ kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+ kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+ kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+ sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_bam_register:
+ iounmap(bam_props.virt_addr);
+fail_bam_remap:
+fail_init_hw:
+ iounmap(ipa_ctx->mmio);
+fail_remap:
+ kfree(ipa_ctx);
+ ipa_ctx = NULL;
+fail_mem:
+ /* gate IPA clocks */
+ ipa_disable_clks();
+ return result;
+}
+
+static int ipa_plat_drv_probe(struct platform_device *pdev_p)
+{
+ int result = 0;
+ struct resource *resource_p;
+ IPADBG("IPA plat drv probe\n");
+
+ /* initialize ipa_res */
+ ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+ ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+
+ result = ipa_load_pipe_connection(pdev_p,
+ A2_TO_IPA,
+ &ipa_res.a2_to_ipa_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
+ &ipa_res.ipa_to_a2_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ /* Get IPA wrapper address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_mem_base = resource_p->start;
+ ipa_res.ipa_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA BAM address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "bam-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_mem_base = resource_p->start;
+ ipa_res.bam_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA pipe mem start ofst */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-pipe-mem");
+
+ if (!resource_p) {
+ IPADBG(":get resource failed for ipa-pipe-mem\n");
+ } else {
+ ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
+ ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "ipa-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_irq = resource_p->start;
+ }
+
+ /* Get IPA BAM IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "bam-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_irq = resource_p->start;
+ }
+
+ IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
+ ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
+ IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
+ ipa_res.bam_mem_base, ipa_res.bam_mem_size);
+ IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
+ ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
+
+ IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
+ IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
+
+ /* stash the IPA dev ptr */
+ ipa_dev = &pdev_p->dev;
+
+ /* get IPA clocks */
+ if (ipa_get_clks(ipa_dev) != 0)
+ return -ENODEV;
+
+ /* enable IPA clocks */
+ ipa_enable_clks();
+
+ /* Proceed to real initialization */
+ result = ipa_init(&ipa_res);
+ if (result)
+ IPAERR("ipa_init failed\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_type);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_byte_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_time_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ return result;
+}
+
+static struct platform_driver ipa_plat_drv = {
+ .probe = ipa_plat_drv_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipa_plat_drv_match,
+ },
+};
+
+static int ipa_plat_drv_init(void)
+{
+ return platform_driver_register(&ipa_plat_drv);
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+ return ipa_ctx;
+}
+
+static int __init ipa_module_init(void)
+{
+ int result = 0;
+
+ IPADBG("IPA module init\n");
+ ipa_debugfs_init();
+ /* Register as a platform device driver */
+ result = ipa_plat_drv_init();
+
+ return result;
+}
+
+late_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
new file mode 100644
index 0000000..cf51ab6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include "ipa_i.h"
+
+enum ipa_bridge_id {
+ IPA_DL_FROM_A2,
+ IPA_DL_TO_IPA,
+ IPA_UL_FROM_IPA,
+ IPA_UL_TO_A2,
+ IPA_BRIDGE_ID_MAX
+};
+
+static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
+static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
+static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+
+struct ipa_pkt_info {
+ void *buffer;
+ dma_addr_t dma_address;
+ uint32_t len;
+ struct list_head list_node;
+};
+
+struct ipa_bridge_pipe_context {
+ struct list_head head_desc_list;
+ struct sps_pipe *pipe;
+ struct sps_connect connection;
+ struct sps_mem_buffer desc_mem_buf;
+ struct sps_register_event register_event;
+ spinlock_t spinlock;
+ u32 len;
+ u32 free_len;
+ struct list_head free_desc_list;
+};
+
+static struct ipa_bridge_pipe_context bridge[IPA_BRIDGE_ID_MAX];
+
+static struct workqueue_struct *ipa_ul_workqueue;
+static struct workqueue_struct *ipa_dl_workqueue;
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir);
+
+static u32 alloc_cnt[IPA_DIR_MAX];
+
+static void ul_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_UL);
+}
+
+static void dl_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_DL);
+}
+
+static DECLARE_WORK(ul_work, ul_work_func);
+static DECLARE_WORK(dl_work, dl_work_func);
+
+static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->register_event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int queue_rx_single(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_pkt_info *info;
+ int ret;
+
+ info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
+ if (!info) {
+ IPAERR("unable to alloc rx_pkt_info\n");
+ goto fail_pkt;
+ }
+
+ info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!info->buffer) {
+ IPAERR("unable to alloc rx_pkt_buffer\n");
+ goto fail_buffer;
+ }
+
+ info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (info->dma_address == 0 || info->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)info->dma_address, info->buffer);
+ goto fail_dma;
+ }
+
+ info->len = ~0;
+
+ list_add_tail(&info->list_node, &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
+ IPA_RX_SKB_SIZE, info,
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&info->list_node);
+ dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_dma;
+ }
+ sys_rx->len++;
+ return 0;
+
+fail_dma:
+ kfree(info->buffer);
+fail_buffer:
+ kfree(info);
+fail_pkt:
+ IPAERR("failed\n");
+ return -ENOMEM;
+}
+
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
+ struct ipa_pkt_info *tx_pkt;
+ struct ipa_pkt_info *rx_pkt;
+ struct ipa_pkt_info *tmp_pkt;
+ struct sps_iovec iov;
+ int ret;
+ int inactive_cycles = 0;
+
+ while (1) {
+ ++inactive_cycles;
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_tx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_move_tail(&tx_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->len--;
+ sys_tx->free_len++;
+ tx_pkt->len = ~0;
+ }
+
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_rx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ rx_pkt = list_first_entry(&sys_rx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&rx_pkt->list_node);
+ sys_rx->len--;
+ rx_pkt->len = iov.size;
+
+retry_alloc_tx:
+ if (list_empty(&sys_tx->free_desc_list)) {
+ tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
+ GFP_KERNEL);
+ if (!tmp_pkt) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+ __func__);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp_pkt->buffer) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+ __func__);
+ kfree(tmp_pkt);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->dma_address = dma_map_single(NULL,
+ tmp_pkt->buffer,
+ IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (tmp_pkt->dma_address == 0 ||
+ tmp_pkt->dma_address == ~0) {
+ pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+ __func__,
+ (void *)tmp_pkt->dma_address,
+ tmp_pkt->buffer);
+ }
+
+ list_add_tail(&tmp_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->free_len++;
+ alloc_cnt[dir]++;
+
+ tmp_pkt->len = ~0;
+ }
+
+ tx_pkt = list_first_entry(&sys_tx->free_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&tx_pkt->list_node);
+ sys_tx->free_len--;
+
+retry_add_rx:
+ list_add_tail(&tx_pkt->list_node,
+ &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe,
+ tx_pkt->dma_address,
+ IPA_RX_SKB_SIZE,
+ tx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&tx_pkt->list_node);
+ pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+ __func__, ret);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_rx;
+ }
+ sys_rx->len++;
+
+retry_add_tx:
+ list_add_tail(&rx_pkt->list_node,
+ &sys_tx->head_desc_list);
+ ret = sps_transfer_one(sys_tx->pipe,
+ rx_pkt->dma_address,
+ iov.size,
+ rx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+ __func__, dir);
+ list_del(&rx_pkt->list_node);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_tx;
+ }
+ sys_tx->len++;
+ }
+
+ if (inactive_cycles >= polling_inactivity[dir]) {
+ ipa_switch_to_intr_mode(dir);
+ break;
+ }
+ }
+}
+
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_UL);
+ queue_work(ipa_ul_workqueue, &ul_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct ipa_ep_cfg_mode mode;
+ dma_addr_t dma_addr;
+ int ipa_ep_idx;
+ int ret;
+ int i;
+
+ if (dir == IPA_DL) {
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_PROD);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_TO_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = ipa_ctx->bam_handle;
+ sys->connection.dest_pipe_index = ipa_ep_idx;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+
+ mode.mode = IPA_DMA;
+ mode.dst = IPA_CLIENT_USB_CONS;
+ ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
+ if (ret < 0) {
+ IPAERR("DMA mode set error %d\n", ret);
+ goto tx_mode_set_failed;
+ }
+
+ return 0;
+
+tx_mode_set_failed:
+ sps_disconnect(sys->pipe);
+tx_connect_failed:
+ dma_free_coherent(NULL, sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else {
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_CONS);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = ipa_ctx->bam_handle;
+ sys->connection.src_pipe_index = 7;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = ipa_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+static void bam_mux_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_DL);
+ queue_work(ipa_dl_workqueue, &dl_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct a2_mux_pipe_connection pipe_conn = { 0, };
+ dma_addr_t dma_addr;
+ u32 a2_handle;
+ int ret;
+ int i;
+
+ if (dir == IPA_UL) {
+ ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
+ goto tx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_TO_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = a2_handle;
+ sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ return 0;
+
+tx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else { /* dir == IPA_UL */
+
+ ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
+ goto rx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_FROM_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = a2_handle;
+ sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = bam_mux_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+/**
+ * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL
+ * workqueues
+ *
+ * Return codes: 0: success, -ENOMEM: failure
+ */
+int ipa_bridge_init(void)
+{
+ int ret;
+
+ ipa_ul_workqueue = alloc_workqueue("ipa_ul",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_ul_workqueue) {
+ IPAERR("ipa ul wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_ul;
+ }
+
+ ipa_dl_workqueue = alloc_workqueue("ipa_dl",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_dl_workqueue) {
+ IPAERR("ipa dl wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_dl;
+ }
+
+ return 0;
+fail_dl:
+ destroy_workqueue(ipa_ul_workqueue);
+fail_ul:
+ return ret;
+}
+
+/**
+ * ipa_bridge_setup() - setup tethered SW bridge in specified direction
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: success
+ * various negative error codes on errors
+ */
+int ipa_bridge_setup(enum ipa_bridge_dir dir)
+{
+ int ret;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (setup_bridge_to_a2(dir)) {
+ IPAERR("fail to setup SYS pipe to A2 %d\n", dir);
+ ret = -EINVAL;
+ goto bail_a2;
+ }
+
+ if (setup_bridge_to_ipa(dir)) {
+ IPAERR("fail to setup SYS pipe to IPA %d\n", dir);
+ ret = -EINVAL;
+ goto bail_ipa;
+ }
+
+ return 0;
+
+bail_ipa:
+ if (dir == IPA_UL)
+ sps_disconnect(bridge[IPA_UL_TO_A2].pipe);
+ else
+ sps_disconnect(bridge[IPA_DL_FROM_A2].pipe);
+bail_a2:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+ return ret;
+}
+
+/**
+ * ipa_bridge_teardown() - teardown the tethered bridge in the specified dir
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: always
+ */
+int ipa_bridge_teardown(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+
+ if (dir == IPA_UL) {
+ sys = &bridge[IPA_UL_TO_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sps_disconnect(sys->pipe);
+ } else {
+ sys = &bridge[IPA_DL_FROM_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_DL_TO_IPA];
+ sps_disconnect(sys->pipe);
+ }
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+
+/**
+ * ipa_bridge_cleanup() - de-initialize the tethered bridge
+ *
+ * Return codes:
+ * None
+ */
+void ipa_bridge_cleanup(void)
+{
+ destroy_workqueue(ipa_dl_workqueue);
+ destroy_workqueue(ipa_ul_workqueue);
+}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
new file mode 100644
index 0000000..823b17d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+ struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+ int result = -EFAULT;
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP alloc failed EP.\n");
+ return -EFAULT;
+ }
+
+ result = sps_get_config(ep->ep_hdl,
+ &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ return -EFAULT;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination =
+ in->client_bam_hdl;
+ ep->connect.source = ipa_ctx->bam_handle;
+ ep->connect.dest_pipe_index =
+ in->client_ep_idx;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = in->client_bam_hdl;
+ ep->connect.destination = ipa_ctx->bam_handle;
+ ep->connect.src_pipe_index = in->client_ep_idx;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+ struct sps_mem_buffer *mem_buff_ptr,
+ bool *fifo_in_pipe_mem_ptr,
+ u32 *fifo_pipe_mem_ofst_ptr,
+ u32 fifo_size, int ipa_ep_idx)
+{
+ dma_addr_t dma_addr;
+ u32 ofst;
+ int result = -EFAULT;
+
+ mem_buff_ptr->size = fifo_size;
+ if (in->pipe_mem_preferred) {
+ if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+ IPAERR("FIFO pipe mem alloc fail ep %u\n",
+ ipa_ep_idx);
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL,
+ mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ } else {
+ memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+ result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+ fifo_size, 1);
+ WARN_ON(result);
+ *fifo_in_pipe_mem_ptr = 1;
+ dma_addr = mem_buff_ptr->phys_base;
+ *fifo_pipe_mem_ofst_ptr = ofst;
+ }
+ } else {
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL, mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ }
+ mem_buff_ptr->phys_base = dma_addr;
+ if (mem_buff_ptr->base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int ipa_ep_idx_dst;
+ int result = -EFAULT;
+ struct ipa_ep_context *ep;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+ in->client >= IPA_CLIENT_MAX ||
+ in->ipa_ep_cfg.mode.dst >= IPA_CLIENT_MAX ||
+ in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_PROD(in->client) &&
+ (in->ipa_ep_cfg.mode.mode == IPA_DMA)) {
+ ipa_ep_idx_dst = ipa_get_ep_mapping(ipa_ctx->mode,
+ in->ipa_ep_cfg.mode.dst);
+ if ((ipa_ep_idx_dst == -1) ||
+ (ipa_ctx->ep[ipa_ep_idx_dst].valid)) {
+ IPADBG("dst EP for IPA input pipe doesn't yet exist\n");
+ }
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ep->valid = 1;
+ ep->client = in->client;
+ ep->notify = in->notify;
+ ep->priv = in->priv;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to configure SPS.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (in->desc.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+ &ep->desc_fifo_in_pipe_mem,
+ &ep->desc_fifo_pipe_mem_ofst,
+ in->desc_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DESC FIFO.\n");
+ goto desc_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DESC FIFO\n");
+ ep->connect.desc = in->desc;
+ ep->desc_fifo_client_allocated = 1;
+ }
+ IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
+ ep->connect.desc.size);
+
+ if (in->data.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+ &ep->data_fifo_in_pipe_mem,
+ &ep->data_fifo_pipe_mem_ofst,
+ in->data_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DATA FIFO.\n");
+ goto data_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DATA FIFO\n");
+ ep->connect.data = in->data;
+ ep->data_fifo_client_allocated = 1;
+ }
+ IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
+ ep->connect.data.size);
+
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+ ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+
+ result = sps_connect(ep->ep_hdl, &ep->connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto sps_connect_fail;
+ }
+
+ sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+ sps->ipa_ep_idx = ipa_ep_idx;
+ *clnt_hdl = ipa_ep_idx;
+ memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+ memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+ return 0;
+
+sps_connect_fail:
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+
+data_mem_alloc_fail:
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+
+desc_mem_alloc_fail:
+ sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ result = sps_disconnect(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS disconnect failed.\n");
+ return -EPERM;
+ }
+
+ if (!ep->desc_fifo_client_allocated &&
+ ep->connect.desc.base) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+
+ if (!ep->data_fifo_client_allocated &&
+ ep->connect.data.base) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+
+ result = sps_free_endpoint(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS de-alloc EP failed.\n");
+ return -EPERM;
+ }
+
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
new file mode 100644
index 0000000..43b0178d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -0,0 +1,507 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "ipa_i.h"
+
+
+#define IPA_MAX_MSG_LEN 1024
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static s8 ep_reg_idx;
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_FILTER=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n"
+ "IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST),
+ ipa_read_reg(ipa_ctx->mmio,
+ IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option >= IPA_NUM_PIPES) {
+ IPAERR("bad pipe specified %u\n", option);
+ return count;
+ }
+
+ ep_reg_idx = option;
+
+ return count;
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int start_idx;
+ int end_idx;
+ int size = 0;
+ int ret;
+ loff_t pos;
+
+ /* negative ep_reg_idx means all registers */
+ if (ep_reg_idx < 0) {
+ start_idx = 0;
+ end_idx = IPA_NUM_PIPES;
+ } else {
+ start_idx = ep_reg_idx;
+ end_idx = start_idx + 1;
+ }
+ pos = *ppos;
+ for (i = start_idx; i < end_idx; i++) {
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_n_OFST(i)));
+ *ppos = pos;
+ ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+ nbytes);
+ if (ret < 0)
+ return ret;
+
+ size += ret;
+ ubuf += nbytes;
+ count -= nbytes;
+ }
+
+ *ppos = pos + size;
+ return size;
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_hdr_entry *entry;
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "name:%s len=%d ref=%d partial=%d lcl=%d ofst=%u ",
+ entry->name,
+ entry->hdr_len, entry->ref_cnt,
+ entry->is_partial,
+ ipa_ctx->hdr_tbl_lcl,
+ entry->offset_entry->offset >> 2);
+ for (i = 0; i < entry->hdr_len; i++) {
+ scnprintf(dbg_buff + cnt + nbytes + i * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - i * 2,
+ "%02x", entry->hdr[i]);
+ }
+ scnprintf(dbg_buff + cnt + nbytes + entry->hdr_len * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - entry->hdr_len * 2,
+ "\n");
+ cnt += nbytes + entry->hdr_len * 2 + 1;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static int ipa_attrib_dump(char *buff, size_t sz,
+ struct ipa_rule_attrib *attrib, enum ipa_ip_type ip)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ uint32_t addr[4];
+ uint32_t mask[4];
+ int i;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tos:%d ",
+ attrib->u.v4.tos);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "protocol:%d ",
+ attrib->u.v4.protocol);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.src_addr);
+ mask[0] = htonl(attrib->u.v4.src_addr_mask);
+ nbytes = scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI4 src_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.src_addr[i]);
+ mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI6 src_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.dst_addr);
+ mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+ mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "src_port_range:%u %u ",
+ attrib->src_port_lo,
+ attrib->src_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "dst_port_range:%u %u ",
+ attrib->dst_port_lo,
+ attrib->dst_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "type:%d ",
+ attrib->type);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "code:%d ",
+ attrib->code);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "spi:%x ",
+ attrib->spi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "src_port:%u ",
+ attrib->src_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "dst_port:%u ",
+ attrib->dst_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tc:%d ",
+ attrib->u.v6.tc);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "flow_label:%x ",
+ attrib->u.v6.flow_label);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "next_hdr:%d ",
+ attrib->u.v6.next_hdr);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "metadata:%x metadata_mask:%x",
+ attrib->meta_data, attrib->meta_data_mask);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "frg ");
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(buff + cnt, sz - cnt, "\n");
+ cnt += nbytes;
+
+ return cnt;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 hdr_ofst;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (entry->hdr)
+ hdr_ofst = entry->hdr->offset_entry->offset;
+ else
+ hdr_ofst = 0;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "tbl_idx:%d tbl_name:%s tbl_ref:%u rule_idx:%d dst:%d ep:%d S:%u hdr_ofst[words]:%u attrib_mask:%08x ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt, i, entry->rule.dst,
+ ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst),
+ !ipa_ctx->hdr_tbl_lcl,
+ hdr_ofst >> 2,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i;
+ int j;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ struct ipa_rt_tbl *rt_tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib, ip);
+ i++;
+ }
+
+ for (j = 0; j < IPA_NUM_PIPES; j++) {
+ tbl = &ipa_ctx->flt_tbl[j][ip];
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ j, i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt +=
+ ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+ .read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+ .read = ipa_read_ep_reg,
+ .write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_hdr_ops = {
+ .read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+ .read = ipa_read_rt,
+ .open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_flt_ops = {
+ .read = ipa_read_flt,
+ .open = ipa_open_dbg,
+};
+
+void ipa_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+ &ipa_gen_reg_ops);
+ if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+ IPAERR("fail to create file for debug_fs gen_reg\n");
+ goto fail;
+ }
+
+ dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+ &ipa_ep_reg_ops);
+ if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+ IPAERR("fail to create file for debug_fs ep_reg\n");
+ goto fail;
+ }
+
+ dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+ &ipa_hdr_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs hdr\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_rt_ops);
+ if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+ IPAERR("fail to create file for debug_fs ip4 rt\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_rt_ops);
+ if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+ IPAERR("fail to create file for debug_fs ip6:w" " rt\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_flt_ops);
+ if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_flt_ops);
+ if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+ if (IS_ERR(dent)) {
+ IPAERR("ipa_debugfs_remove: folder was not created.\n");
+ return;
+ }
+ debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
new file mode 100644
index 0000000..c677a6e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+/**
+ * ipa_write_done - this function will be (enevtually) called when a Tx
+ * operation is complete
+ * @work: work_struct used by the work queue
+ */
+void ipa_write_done(struct work_struct *work)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+ unsigned long irq_flags;
+ struct ipa_mem_buffer mult = { 0 };
+ int i;
+ u16 cnt;
+
+ tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+ cnt = tx_pkt->cnt;
+ IPADBG("cnt=%d\n", cnt);
+
+ if (unlikely(cnt == 0))
+ WARN_ON(1);
+
+ if (cnt > 1 && cnt != 0xFFFF)
+ mult = tx_pkt->mult;
+
+ for (i = 0; i < cnt; i++) {
+ if (unlikely(tx_pkt == NULL))
+ WARN_ON(1);
+ spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
+ tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper,
+ link);
+ if (unlikely(tx_pkt != tx_pkt_expected)) {
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
+ irq_flags);
+ WARN_ON(1);
+ }
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ tx_pkt->sys->len--;
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ if (tx_pkt->callback)
+ tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+
+ if (mult.phys_base)
+ dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ unsigned long irq_flags;
+ int result;
+ u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+ dma_addr_t dma_address;
+ u16 len;
+
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto fail_mem_alloc;
+ }
+
+ WARN_ON(desc->len > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the packet does not
+ * cross a 1KB boundary
+ */
+ tx_pkt->bounce = dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+ GFP_KERNEL, &dma_address);
+ if (!tx_pkt->bounce) {
+ dma_address = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary
+ ((u32)dma_address, (u32)dma_address + desc->len - 1,
+ 1024));
+ memcpy(tx_pkt->bounce, desc->pyld, desc->len);
+ }
+
+ if (!dma_address) {
+ IPAERR("failed to DMA wrap\n");
+ goto fail_dma_map;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc->type;
+ tx_pkt->cnt = 1; /* only 1 desc in this "set" */
+
+ tx_pkt->mem.phys_base = dma_address;
+ tx_pkt->mem.base = desc->pyld;
+ tx_pkt->mem.size = desc->len;
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc->callback;
+ tx_pkt->user1 = desc->user1;
+ tx_pkt->user2 = desc->user2;
+
+ /*
+ * Special treatment for immediate commands, where the structure of the
+ * descriptor is different
+ */
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ sps_flags |= SPS_IOVEC_FLAG_IMME;
+ len = desc->opcode;
+ } else {
+ len = desc->len;
+ }
+
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+ sps_flags);
+ if (result) {
+ IPAERR("sps_transfer_one failed rc=%d\n", result);
+ goto fail_sps_send;
+ }
+
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ return 0;
+
+fail_sps_send:
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ dma_address);
+fail_dma_map:
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+ return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct sps_transfer transfer = { 0 };
+ struct sps_iovec *iovec;
+ unsigned long irq_flags;
+ dma_addr_t dma_addr;
+ int i;
+ int j;
+ int result;
+ int fail_dma_wrap;
+ uint size = num_desc * sizeof(struct sps_iovec);
+
+ for (i = 0; i < num_desc; i++) {
+ fail_dma_wrap = 0;
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto failure;
+ }
+ /*
+ * first desc of set is "special" as it holds the count and
+ * other info
+ */
+ if (i == 0) {
+ transfer.user = tx_pkt;
+ transfer.iovec =
+ dma_alloc_coherent(NULL, size, &dma_addr, 0);
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ if (!transfer.iovec) {
+ IPAERR("fail alloc DMA mem for sps xfr buff\n");
+ goto failure;
+ }
+
+ tx_pkt->mult.phys_base = dma_addr;
+ tx_pkt->mult.base = transfer.iovec;
+ tx_pkt->mult.size = size;
+ tx_pkt->cnt = num_desc;
+ }
+
+ iovec = &transfer.iovec[i];
+ iovec->flags = 0;
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc[i].type;
+
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ WARN_ON(tx_pkt->mem.size > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the
+ * packet does not cross a 1KB boundary
+ */
+ tx_pkt->bounce =
+ dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool, GFP_KERNEL,
+ &tx_pkt->mem.phys_base);
+ if (!tx_pkt->bounce) {
+ tx_pkt->mem.phys_base = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary(
+ (u32)tx_pkt->mem.phys_base,
+ (u32)tx_pkt->mem.phys_base +
+ tx_pkt->mem.size - 1, 1024));
+ memcpy(tx_pkt->bounce, tx_pkt->mem.base,
+ tx_pkt->mem.size);
+ }
+
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to alloc tx wrapper\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc[i].callback;
+ tx_pkt->user1 = desc[i].user1;
+ tx_pkt->user2 = desc[i].user2;
+
+ iovec->addr = tx_pkt->mem.phys_base;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /*
+ * Special treatment for immediate commands, where the structure
+ * of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ iovec->size = desc[i].opcode;
+ iovec->flags |= SPS_IOVEC_FLAG_IMME;
+ } else {
+ iovec->size = desc[i].len;
+ }
+
+ if (i == (num_desc - 1)) {
+ iovec->flags |= (SPS_IOVEC_FLAG_EOT |
+ SPS_IOVEC_FLAG_INT);
+ /* "mark" the last desc */
+ tx_pkt->cnt = 0xFFFF;
+ }
+ }
+
+ result = sps_transfer(sys->ep->ep_hdl, &transfer);
+ if (result) {
+ IPAERR("sps_transfer failed rc=%d\n", result);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+ tx_pkt = transfer.user;
+ for (j = 0; j < i; j++) {
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+ if (i < num_desc)
+ /* last desc failed */
+ if (fail_dma_wrap)
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ if (transfer.iovec_phys)
+ dma_free_coherent(NULL, size, transfer.iovec,
+ transfer.iovec_phys);
+
+ return -EFAULT;
+}
+
+/**
+ * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * immediate command is complete.
+ * @user1: pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_cmd_ack(void *user1, void *user2)
+{
+ struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+ if (!desc)
+ WARN_ON(1);
+ IPADBG("got ack for cmd=%d\n", desc->opcode);
+ complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc: number of descriptors within the descr struct
+ * @descr: descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+ struct ipa_desc *desc;
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ /* client should not set these */
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa_cmd_ack;
+ descr->user1 = descr;
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
+ IPAERR("fail to send immediate command\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ /* client should not set these */
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa_cmd_ack;
+ desc->user1 = desc;
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_tx_notify() - Callback function which will be called by the SPS driver
+ * after a Tx operation is complete. Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ */
+static void ipa_tx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ tx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ * - Disconnect the packet from the system pipe linked list
+ * - Unmap the packets skb, make it non DMAable
+ * - Free the packet from the cache
+ * - Prepare a proper skb
+ * - Call the endpoints notify function, passing the skb in the parameters
+ * - Replenish the rx cache
+ */
+void ipa_handle_rx_core(void)
+{
+ struct ipa_a5_mux_hdr *mux_hdr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct sk_buff *rx_skb;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ u16 pull_len;
+ u16 padding;
+ int ret;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+ struct ipa_ep_context *ep;
+
+ do {
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+
+ /* Break the loop when there are no more packets to receive */
+ if (iov.addr == 0)
+ break;
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ if (list_empty(&sys->head_desc_list))
+ WARN_ON(1);
+ rx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_rx_pkt_wrapper, link);
+ if (!rx_pkt)
+ WARN_ON(1);
+ rx_pkt->len = iov.size;
+ sys->len--;
+ list_del(&rx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ IPADBG("--curr_cnt=%d\n", sys->len);
+
+ rx_skb = rx_pkt->skb;
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ /*
+ * make it look like a real skb, "data" was already set at
+ * alloc time
+ */
+ rx_skb->tail = rx_skb->data + rx_pkt->len;
+ rx_skb->len = rx_pkt->len;
+ rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+
+ mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+ IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ mux_hdr->src_pipe_index,
+ mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+ IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+ if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
+ IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
+ mux_hdr->src_pipe_index,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+ dev_kfree_skb_any(rx_skb);
+ ipa_replenish_rx_cache();
+ continue;
+ }
+
+ ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
+ pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+ /*
+ * IP packet starts on word boundary
+ * remove the MUX header and any padding and pass the frame to
+ * the client which registered a rx callback on the "src pipe"
+ */
+ padding = ep->cfg.hdr.hdr_len & 0x3;
+ if (padding)
+ pull_len += 4 - padding;
+
+ IPADBG("pulling %d bytes from skb\n", pull_len);
+ skb_pull(rx_skb, pull_len);
+ ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ ipa_replenish_rx_cache();
+ } while (1);
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(void)
+{
+ int ret;
+ struct ipa_sys_context *sys;
+
+ IPADBG("Enter");
+ if (!ipa_ctx->curr_polling_state) {
+ IPAERR("already in intr mode\n");
+ return;
+ }
+
+ sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ return;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_handle_rx_core();
+ ipa_ctx->curr_polling_state = 0;
+}
+
+/**
+ * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
+ */
+static void ipa_rx_switch_to_poll_mode(void)
+{
+ int ret;
+ struct ipa_ep_context *ep;
+
+ IPADBG("Enter");
+ ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+ ret = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_ctx->curr_polling_state = 1;
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify: SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (!ipa_ctx->curr_polling_state) {
+ ipa_rx_switch_to_poll_mode();
+ rx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+ }
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and config EP
+ * @clnt_hdl: [out] client handle
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int sys_idx = -1;
+ int result = -EFAULT;
+ dma_addr_t dma_addr;
+
+ if (sys_in == NULL || clnt_hdl == NULL ||
+ sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail_bad_param;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail_bad_param;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
+ IPAERR("EP already allocated.\n");
+ goto fail_bad_param;
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+ ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_sps_api;
+ }
+
+ /* Default Config */
+ ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
+
+ if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
+ IPAERR("SPS EP allocation failed.\n");
+ goto fail_sps_api;
+ }
+
+ result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ goto fail_mem_alloc;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
+ } else {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |=
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
+ dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ &dma_addr, 0);
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
+ if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
+ IPAERR("fail to get DMA desc memory.\n");
+ goto fail_mem_alloc;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+ result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto fail_sps_connect;
+ }
+
+ switch (ipa_ep_idx) {
+ case 1:
+ /* fall through */
+ case 2:
+ /* fall through */
+ case 3:
+ sys_idx = ipa_ep_idx;
+ break;
+ case 15:
+ sys_idx = IPA_A5_WLAN_AMPDU_OUT;
+ break;
+ default:
+ IPAERR("Invalid EP index.\n");
+ result = -EFAULT;
+ goto fail_register_event;
+ }
+
+ if (!ipa_ctx->polling_mode) {
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("rx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ } else {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("tx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ }
+ }
+
+ return 0;
+
+fail_register_event:
+ sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_connect:
+ dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
+fail_mem_alloc:
+ sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_api:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_bad_param:
+ return result;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
+ ipa_ctx->ep[clnt_hdl].connect.desc.base,
+ ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
+ sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+/**
+ * ipa_tx_comp() - Callback function which will call the user supplied callback
+ * function to release the skb, or release it on its own if no callback function
+ * was supplied.
+ * @user1
+ * @user2
+ */
+static void ipa_tx_comp(void *user1, void *user2)
+{
+ struct sk_buff *skb = (struct sk_buff *)user1;
+ u32 ep_idx = (u32)user2;
+
+ IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+ if (ipa_ctx->ep[ep_idx].notify)
+ ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)skb);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client calback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ struct ipa_desc desc[2];
+ int ipa_ep_idx;
+ struct ipa_ip_packet_init *cmd;
+
+ memset(&desc, 0, 2 * sizeof(struct ipa_desc));
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
+ if (ipa_ep_idx == -1) {
+ IPAERR("dest EP does not exist.\n");
+ goto fail_gen;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("dest EP not valid.\n");
+ goto fail_gen;
+ }
+
+ if (IPA_CLIENT_IS_CONS(dst)) {
+ cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_mem_alloc;
+ }
+
+ cmd->destination_pipe_index = ipa_ep_idx;
+ if (meta && meta->mbim_stream_id_valid)
+ cmd->metadata = meta->mbim_stream_id;
+ desc[0].opcode = IPA_IP_PACKET_INIT;
+ desc[0].pyld = cmd;
+ desc[0].len = sizeof(struct ipa_ip_packet_init);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[1].pyld = skb->data;
+ desc[1].len = skb->len;
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].callback = ipa_tx_comp;
+ desc[1].user1 = skb;
+ desc[1].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send;
+ }
+ } else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
+ desc[0].pyld = skb->data;
+ desc[0].len = skb->len;
+ desc[0].type = IPA_DATA_DESC_SKB;
+ desc[0].callback = ipa_tx_comp;
+ desc[0].user1 = skb;
+ desc[0].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
+ &desc[0])) {
+ IPAERR("fail to send skb\n");
+ goto fail_gen;
+ }
+ } else {
+ IPAERR("%d PROD is not supported.\n", dst);
+ goto fail_gen;
+ }
+
+ return 0;
+
+fail_send:
+ kfree(cmd);
+fail_mem_alloc:
+fail_gen:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+void ipa_handle_rx(struct work_struct *work)
+{
+ ipa_handle_rx_core();
+ ipa_rx_switch_to_intr_mode();
+}
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ * - Allocate a buffer in the cache
+ * - Initialized the packets link
+ * - Initialize the packets work struct
+ * - Allocate the packets socket buffer (skb)
+ * - Fill the packets skb with data
+ * - Make the packet DMAable
+ * - Add the packet to the system pipe linked list
+ * - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+void ipa_replenish_rx_cache(void)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ rx_len_cached = sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /* true RX data path is not currently exercised so drop the ceil */
+ while (rx_len_cached < (IPA_RX_POOL_CEIL >> 3)) {
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+
+ rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+ if (rx_pkt->skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
+ rx_pkt->dma_address = dma_map_single(NULL, ptr,
+ IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->dma_address, ptr);
+ goto fail_dma_mapping;
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
+ IPA_RX_SKB_SIZE, rx_pkt,
+ SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+
+ IPADBG("++curr_cnt=%d\n", sys->len);
+ }
+
+ return;
+
+fail_sps_transfer:
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_del(&rx_pkt->link);
+ --sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+fail_dma_mapping:
+ dev_kfree_skb_any(rx_pkt->skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ return;
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+void ipa_cleanup_rx(void)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_rx_pkt_wrapper *r;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->head_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_pkt->skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
new file mode 100644
index 0000000..81f3a80
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE (4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3)
+#define IPA_FLT_BIT_MASK (0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_flt_entry *entry, u8 *buf)
+{
+ struct ipa_flt_rule_hw_hdr *hdr;
+ const struct ipa_flt_rule *rule =
+ (const struct ipa_flt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+ hdr->u.hdr.action = entry->rule.action;
+ hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+ hdr->u.hdr.rsvd = 0;
+ buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule %x\n", en_rule);
+
+ hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR("hw_len differs b/w passes passed=%x calc=%x\n",
+ entry->hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 total_sz = 0;
+ u32 rule_set_sz;
+ int i;
+
+ *hdr_sz = 0;
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("pipe %d len %d\n", i, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz +
+ IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+ }
+
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 hdr_top = 0;
+ int i;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer flt_tbl_mem;
+ u8 *ftbl_membody;
+
+ mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+ if (mem->size == 0) {
+ IPAERR("flt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the flt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* write a dummy header to move cursor */
+ hdr = ipa_write_32(hdr_top, hdr);
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ hdr_top |= IPA_FLT_BIT_MASK;
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody = ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ /* pipe "i" is at bit "i+1" */
+ hdr_top |= (1 << (i + 1));
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("ofst is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ body)) {
+ IPAERR("fail gen FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail alloc DMA buff size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("fail gen FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody =
+ ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+ }
+
+ /* now write the hdr_top */
+ ipa_write_32(hdr_top, base);
+
+ return 0;
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+ i, ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0,
+ sizeof(tbl->curr_mem));
+ }
+ }
+ }
+}
+
+static int __ipa_commit_flt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_filter_init *v4;
+ struct ipa_ip_v6_filter_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v4_filter_init);
+ } else {
+ avail = IPA_RAM_V6_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v6_filter_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_flt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_filter_init *)cmd;
+ desc.opcode = IPA_IP_V4_FILTER_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_FLT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_filter_init *)cmd;
+ desc.opcode = IPA_IP_V6_FILTER_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_FLT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_flt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (!rule->rt_tbl_hdl) {
+ IPAERR("flt rule does not point to valid RT tbl\n");
+ goto error;
+ }
+
+ if (ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rule->rt_tbl_hdl) == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (((struct ipa_rt_tbl *)rule->rt_tbl_hdl)->cookie != IPA_COOKIE) {
+ IPAERR("flt rule cookie is invalid\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc FLT rule object\n");
+ goto mem_alloc_fail;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->rule = *rule;
+ entry->cookie = IPA_COOKIE;
+ entry->rt_tbl = (struct ipa_rt_tbl *)rule->rt_tbl_hdl;
+ entry->tbl = tbl;
+ if (add_rear)
+ list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_flt_rule_list);
+ tbl->rule_cnt++;
+ entry->rt_tbl->ref_cnt++;
+ *rule_hdl = (u32)entry;
+ IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->flt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+mem_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+
+ return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+ struct ipa_flt_entry *entry = (struct ipa_flt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+
+ return -EPERM;
+ }
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ IPADBG("add global flt rule ip=%d\n", ip);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+ int ipa_ep_idx;
+
+ if (ip >= IPA_IP_MAX || rule == NULL || rule_hdl == NULL ||
+ ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
+ if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
+ ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+ IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int i;
+ int result;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (rules->global)
+ result = __ipa_add_global_flt_rule(rules->ip,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ else
+ result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_flt(rules->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_flt(hdls->ip)) {
+ mutex_unlock(&ipa_ctx->lock);
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ int result;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (__ipa_commit_flt(ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ struct ipa_flt_entry *next;
+ struct ipa_tree_node *node;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset flt ip=%d\n", ip);
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+ link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree,
+ (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
new file mode 100644
index 0000000..4b9a500
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 32, 64 };
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem: [out] buffer to put the header table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+ struct ipa_hdr_entry *entry;
+
+ mem->size = ipa_ctx->hdr_tbl.end;
+
+ if (mem->size == 0) {
+ IPAERR("hdr tbl empty\n");
+ return -EPERM;
+ }
+ IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ memset(mem->base, 0, mem->size);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+ entry->offset_entry->offset);
+ memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+ entry->hdr_len);
+ }
+
+ return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+static int __ipa_commit_hdr(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ struct ipa_hdr_init_local *cmd;
+ u16 len;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ /* the immediate command param size is same for both local and system */
+ len = sizeof(struct ipa_hdr_init_local);
+
+ /*
+ * we can use init_local ptr for init_system due to layout of the
+ * struct
+ */
+ cmd = kmalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_hdr_hw_tbl(mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+ IPA_RAM_HDR_SIZE);
+ goto fail_hw_tbl_gen;
+ }
+
+ cmd->hdr_table_addr = mem->phys_base;
+ if (ipa_ctx->hdr_tbl_lcl) {
+ cmd->size_hdr_table = mem->size;
+ cmd->hdr_addr = IPA_RAM_HDR_OFST;
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ } else {
+ desc.opcode = IPA_HDR_INIT_SYSTEM;
+ }
+ desc.pyld = cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ } else {
+ if (ipa_ctx->hdr_mem.phys_base) {
+ dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ }
+ ipa_ctx->hdr_mem = *mem;
+ }
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_offset_entry *offset;
+ struct ipa_tree_node *node;
+ u32 bin;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (hdr->hdr_len == 0) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc hdr object\n");
+ goto hdr_alloc_fail;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+ entry->hdr_len = hdr->hdr_len;
+ strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+ entry->is_partial = hdr->is_partial;
+ entry->cookie = IPA_COOKIE;
+
+ if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+ bin = IPA_HDR_BIN0;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+ bin = IPA_HDR_BIN1;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+ bin = IPA_HDR_BIN2;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+ bin = IPA_HDR_BIN3;
+ else {
+ IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ goto bad_hdr_len;
+ }
+
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc hdr offset object\n");
+ goto ofst_alloc_fail;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which are set
+ * in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ list_add(&entry->link, &htbl->head_hdr_entry_list);
+ htbl->hdr_cnt++;
+ IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
+ htbl->hdr_cnt, offset->offset);
+
+ hdr->hdr_hdl = (u32) entry;
+ node->hdl = hdr->hdr_hdl;
+ if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+ofst_alloc_fail:
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
+bad_hdr_len:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+hdr_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+static int __ipa_del_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (!entry || (entry->cookie != IPA_COOKIE) || (entry->ref_cnt != 0)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+ htbl->hdr_cnt, entry->offset_entry->offset);
+
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ list_del(&entry->link);
+ htbl->hdr_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdrs == NULL || hdrs->num_hdrs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdrs->num_hdrs; i++) {
+ if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ IPAERR("failed to add hdr %d\n", i);
+ hdrs->hdr[i].status = -1;
+ } else {
+ hdrs->hdr[i].status = 0;
+ }
+ }
+
+ if (hdrs->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
+ * to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_dump_hdr() - prints all the headers in the header table in SW
+ *
+ * Note: Should not be called from atomic context
+ */
+void ipa_dump_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+
+ IPADBG("START\n");
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
+ entry->offset_entry->offset,
+ entry->offset_entry->bin);
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+ int result = -EFAULT;
+
+ /*
+ * issue a commit on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_commit_rt(IPA_IP_v4))
+ return -EPERM;
+ if (ipa_commit_rt(IPA_IP_v6))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_entry *next;
+ struct ipa_hdr_offset_entry *off_entry;
+ struct ipa_hdr_offset_entry *off_next;
+ struct ipa_tree_node *node;
+ int i;
+
+ /*
+ * issue a reset on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_reset_rt(IPA_IP_v4))
+ IPAERR("fail to reset v4 rt\n");
+ if (ipa_reset_rt(IPA_IP_v6))
+ IPAERR("fail to reset v4 rt\n");
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset hdr\n");
+ list_for_each_entry_safe(entry, next,
+ &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+ /* do not remove the default exception header */
+ if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX))
+ continue;
+
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ }
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+
+ /*
+ * do not remove the default exception header which is
+ * at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
+
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ }
+ /* there is one header of size 8 */
+ ipa_ctx->hdr_tbl.end = 8;
+ ipa_ctx->hdr_tbl.hdr_cnt = 1;
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+ struct ipa_hdr_entry *entry;
+
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists, if
+ * lookup succeeds the header entry ref cnt is increased
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -1;
+
+ if (lookup == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(lookup->name);
+ if (entry) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t) entry;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ int result = -EFAULT;
+
+ if (entry == NULL || entry->cookie != IPA_COOKIE ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0) {
+ if (__ipa_del_hdr(hdr_hdl)) {
+ IPAERR("fail to del hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ /* commit for put */
+ if (__ipa_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -EFAULT;
+
+ if (copy == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(copy->name);
+ if (entry) {
+ memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+ copy->hdr_len = entry->hdr_len;
+ copy->is_partial = entry->is_partial;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+
diff --git a/drivers/platform/msm/ipa/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_hw_defs.h
new file mode 100644
index 0000000..3131a84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hw_defs.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT (1)
+#define IPA_PPP_FRM_INIT (2)
+#define IPA_IP_V4_FILTER_INIT (3)
+#define IPA_IP_V6_FILTER_INIT (4)
+#define IPA_IP_V4_NAT_INIT (5)
+#define IPA_IP_V6_NAT_INIT (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL (9)
+#define IPA_HDR_INIT_SYSTEM (10)
+#define IPA_DECIPH_SETUP (11)
+#define IPA_INSERT_NAT_RULE (12)
+#define IPA_DELETE_NAT_RULE (13)
+#define IPA_NAT_DMA (14)
+#define IPA_IP_PACKET_TAG (15)
+#define IPA_IP_PACKET_INIT (16)
+
+#define IPA_INTERFACE_ID_EXCEPTION (0)
+#define IPA_INTERFACE_ID_A2_WWAN (0x10)
+#define IPA_INTERFACE_ID_HSUSB_RMNET1 (0x21)
+#define IPA_INTERFACE_ID_HSUSB_RMNET2 (0x22)
+#define IPA_INTERFACE_ID_HSUSB_RMNET3 (0x23)
+#define IPA_INTERFACE_ID_HSIC_WLAN_WAN (0x31)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN1 (0x32)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN2 (0x33)
+#define IPA_INTERFACE_ID_HSIC_RMNET1 (0x41)
+#define IPA_INTERFACE_ID_HSIC_RMNET2 (0x42)
+#define IPA_INTERFACE_ID_HSIC_RMNET3 (0x43)
+#define IPA_INTERFACE_ID_HSIC_RMNET4 (0x44)
+#define IPA_INTERFACE_ID_HSIC_RMNET5 (0x45)
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @rsvd: reserved
+ */
+struct ipa_flt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 action:5;
+ u32 rt_tbl_idx:5;
+ u32 rsvd:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ */
+struct ipa_rt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 pipe_dest_idx:5;
+ u32 system:1;
+ u32 hdr_offset:10;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_addr: address of header table
+ * @size_hdr_table: size of the above
+ * @hdr_addr: header address
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+ u64 hdr_table_addr:32;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: address of header table
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+ u64 hdr_table_addr:32;
+ u64 rsvd:32;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(0)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(1)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(2)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(5)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+ u16 interface_id;
+ u8 src_pipe_index;
+ u8 flags;
+ u32 metadata;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsvd1:3;
+ u64 metadata:32;
+ u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+ u64 ipv4_rules_addr:32;
+ u64 ipv4_expansion_rules_addr:32;
+ u64 index_table_addr:32;
+ u64 index_table_expansion_addr:32;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
new file mode 100644
index 0000000..63ef5fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+
+#define DRV_NAME "ipa"
+#define IPA_COOKIE 0xfacefeed
+
+#define IPA_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ (0x800)
+
+#ifdef IPA_DEBUG
+#define IPADBG(fmt, args...) \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#else
+#define IPADBG(fmt, args...)
+#endif
+
+#define IPAERR(fmt, args...) \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPA_TOS_EQ BIT(0)
+#define IPA_PROTOCOL_EQ BIT(1)
+#define IPA_OFFSET_MEQ32_0 BIT(2)
+#define IPA_OFFSET_MEQ32_1 BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0 BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1 BIT(5)
+#define IPA_IHL_OFFSET_EQ_16 BIT(6)
+#define IPA_IHL_OFFSET_EQ_32 BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0 BIT(8)
+#define IPA_OFFSET_MEQ128_0 BIT(9)
+#define IPA_OFFSET_MEQ128_1 BIT(10)
+#define IPA_TC_EQ BIT(11)
+#define IPA_FL_EQ BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1 BIT(13)
+#define IPA_METADATA_COMPARE BIT(14)
+#define IPA_IPV4_IS_FRAG BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN_MAX 4
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_SKB_SIZE 2048
+
+#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE (128)
+
+/**
+ * enum ipa_sys_pipe - 5 A5-IPA pipes
+ *
+ * 5 A5-IPA pipes (all system mode)
+ */
+enum ipa_sys_pipe {
+ IPA_A5_UNUSED,
+ IPA_A5_CMD,
+ IPA_A5_LAN_WAN_OUT,
+ IPA_A5_LAN_WAN_IN,
+ IPA_A5_WLAN_AMPDU_OUT,
+ IPA_A5_SYS_MAX
+};
+
+/**
+ * enum ipa_operating_mode - IPA operating mode
+ *
+ * IPA operating mode
+ */
+enum ipa_operating_mode {
+ IPA_MODE_USB_DONGLE,
+ IPA_MODE_MSM,
+ IPA_MODE_EXT_APPS,
+ IPA_MODE_MOBILE_AP_WAN,
+ IPA_MODE_MOBILE_AP_WLAN,
+ IPA_MODE_MOBILE_AP_ETH,
+ IPA_MODE_MAX
+};
+
+/**
+ * enum ipa_bridge_dir - direction of the bridge from air interface perspective
+ *
+ * IPA bridge direction
+ */
+enum ipa_bridge_dir {
+ IPA_DL,
+ IPA_UL,
+ IPA_DIR_MAX
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+ void *base;
+ dma_addr_t phys_base;
+ u32 size;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+ struct list_head link;
+ struct ipa_flt_rule rule;
+ u32 cookie;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_rt_tbl *rt_tbl;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of raouting table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ */
+struct ipa_rt_tbl {
+ struct list_head link;
+ struct list_head head_rt_rule_list;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u32 idx;
+ u32 rule_cnt;
+ u32 ref_cnt;
+ struct ipa_rt_tbl_set *set;
+ u32 cookie;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @is_partial: flag indicating if header table entry is partial
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of raouting table
+ */
+struct ipa_hdr_entry {
+ struct list_head link;
+ u8 hdr[IPA_HDR_MAX_SIZE];
+ u32 hdr_len;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u8 is_partial;
+ struct ipa_hdr_offset_entry *offset_entry;
+ u32 cookie;
+ u32 ref_cnt;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+ struct list_head head_hdr_entry_list;
+ struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+ u32 hdr_cnt;
+ u32 end;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+ struct list_head head_flt_rule_list;
+ u32 rule_cnt;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+ struct list_head link;
+ struct ipa_rt_rule rule;
+ u32 cookie;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_hdr_entry *hdr;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+ struct list_head head_rt_tbl_list;
+ u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_tree_node - handle database entry
+ * @node: RB node
+ * @hdl: handle
+ */
+struct ipa_tree_node {
+ struct rb_node node;
+ u32 hdl;
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information
+ * @notify: user provided CB for EP events notification
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ */
+struct ipa_ep_context {
+ int valid;
+ enum ipa_client_type client;
+ struct sps_pipe *ep_hdl;
+ struct ipa_ep_cfg cfg;
+ u32 dst_pipe_index;
+ u32 rt_tbl_idx;
+ struct sps_connect connect;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ bool desc_fifo_in_pipe_mem;
+ bool data_fifo_in_pipe_mem;
+ u32 desc_fifo_pipe_mem_ofst;
+ u32 data_fifo_pipe_mem_ofst;
+ bool desc_fifo_client_allocated;
+ bool data_fifo_client_allocated;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ * @wait_desc_list: used to hold completed Tx packets
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+ struct list_head head_desc_list;
+ u32 len;
+ spinlock_t spinlock;
+ struct sps_register_event event;
+ struct ipa_ep_context *ep;
+ struct list_head wait_desc_list;
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+ IPA_DATA_DESC,
+ IPA_DATA_DESC_SKB,
+ IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: info for the skb or immediate command param
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" tranfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ */
+struct ipa_tx_pkt_wrapper {
+ enum ipa_desc_type type;
+ struct ipa_mem_buffer mem;
+ struct work_struct work;
+ struct list_head link;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct ipa_sys_context *sys;
+ struct ipa_mem_buffer mult;
+ u16 cnt;
+ void *bounce;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+ enum ipa_desc_type type;
+ void *pyld;
+ u16 len;
+ u16 opcode;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @work: work struct for current Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+ struct sk_buff *skb;
+ dma_addr_t dma_address;
+ struct work_struct work;
+ struct list_head link;
+ u16 len;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ */
+struct ipa_nat_mem {
+ struct class *class;
+ struct device *dev;
+ struct cdev cdev;
+ dev_t dev_num;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+ bool is_mapped;
+ bool is_sys_mem;
+ bool is_dev_init;
+ struct mutex lock;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @tree_node_cache: tree nodes cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @sys: IPA sys context for system-bam pipes
+ * @rx_wq: Rx packets work queue
+ * @tx_wq: Tx packets work queue
+ * @smem_sz: shared memory size
+ * @hdr_hdl_tree: header handles tree
+ * @rt_rule_hdl_tree: routing rule handles tree
+ * @rt_tbl_hdl_tree: routing table handles tree
+ * @flt_rule_hdl_tree: filtering rule handles tree
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @polling_mode: 1 - pure polling mode; 0 - interrupt+polling mode
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @curr_polling_state: current polling state
+ * @poll_work: polling work
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_mem: header memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @pipe_mem_pool: pipe memory pool
+ * @one_kb_no_straddle_pool: one kb no straddle pool
+ *
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ u32 bam_handle;
+ struct ipa_ep_context ep[IPA_NUM_PIPES];
+ struct ipa_flt_tbl flt_tbl[IPA_NUM_PIPES][IPA_IP_MAX];
+ enum ipa_operating_mode mode;
+ void __iomem *mmio;
+ u32 ipa_wrapper_base;
+ struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+ struct ipa_hdr_tbl hdr_tbl;
+ struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+ struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+ struct kmem_cache *flt_rule_cache;
+ struct kmem_cache *rt_rule_cache;
+ struct kmem_cache *hdr_cache;
+ struct kmem_cache *hdr_offset_cache;
+ struct kmem_cache *rt_tbl_cache;
+ struct kmem_cache *tx_pkt_wrapper_cache;
+ struct kmem_cache *rx_pkt_wrapper_cache;
+ struct kmem_cache *tree_node_cache;
+ unsigned long rt_idx_bitmap[IPA_IP_MAX];
+ struct mutex lock;
+ struct ipa_sys_context sys[IPA_A5_SYS_MAX];
+ struct workqueue_struct *rx_wq;
+ struct workqueue_struct *tx_wq;
+ u16 smem_sz;
+ struct rb_root hdr_hdl_tree;
+ struct rb_root rt_rule_hdl_tree;
+ struct rb_root rt_tbl_hdl_tree;
+ struct rb_root flt_rule_hdl_tree;
+ struct ipa_nat_mem nat_mem;
+ u32 excp_hdr_hdl;
+ u32 dflt_v4_rt_rule_hdl;
+ u32 dflt_v6_rt_rule_hdl;
+ bool polling_mode;
+ uint aggregation_type;
+ uint aggregation_byte_limit;
+ uint aggregation_time_limit;
+ uint curr_polling_state;
+ struct delayed_work poll_work;
+ bool hdr_tbl_lcl;
+ struct ipa_mem_buffer hdr_mem;
+ bool ip4_rt_tbl_lcl;
+ bool ip6_rt_tbl_lcl;
+ bool ip4_flt_tbl_lcl;
+ bool ip6_flt_tbl_lcl;
+ struct ipa_mem_buffer empty_rt_tbl_mem;
+ struct gen_pool *pipe_mem_pool;
+ struct dma_pool *one_kb_no_straddle_pool;
+ atomic_t ipa_active_clients;
+ u32 clnt_hdl_cmd;
+ u32 clnt_hdl_data_in;
+ u32 clnt_hdl_data_out;
+ u8 a5_pipe_index;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ */
+struct ipa_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+ IPA_SPS_PIPE_MEM = 0,
+ IPA_PRIVATE_MEM = 1,
+ IPA_SYSTEM_MEM = 2,
+};
+
+/**
+ * enum a2_mux_pipe_direction - IPA-A2 pipe direction
+ */
+enum a2_mux_pipe_direction {
+ A2_TO_IPA = 0,
+ IPA_TO_A2 = 1
+};
+
+/**
+ * struct a2_mux_pipe_connection - A2 MUX pipe connection
+ * @src_phy_addr: source physical address
+ * @src_pipe_index: source pipe index
+ * @dst_phy_addr: destination physical address
+ * @dst_pipe_index: destination pipe index
+ * @mem_type: pipe memory type
+ * @data_fifo_base_offset: data FIFO base offset
+ * @data_fifo_size: data FIFO size
+ * @desc_fifo_base_offset: descriptors FIFO base offset
+ * @desc_fifo_size: descriptors FIFO size
+ */
+struct a2_mux_pipe_connection {
+ int src_phy_addr;
+ int src_pipe_index;
+ int dst_phy_addr;
+ int dst_pipe_index;
+ enum ipa_pipe_mem_type mem_type;
+ int data_fifo_base_offset;
+ int data_fifo_size;
+ int desc_fifo_base_offset;
+ int desc_fifo_size;
+};
+
+extern struct ipa_context *ipa_ctx;
+
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect);
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle);
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc);
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc);
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client);
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ u8 **buf,
+ u16 *en_rule);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+void ipa_dump(void);
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem);
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+/*
+ * below functions read from/write to IPA local memory a.k.a. device memory.
+ * the order of the arguments is deliberately different from the ipa_write*
+ * functions which operate on system memory
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram);
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram);
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram);
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count);
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count);
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count);
+
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data);
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl);
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+ ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+void ipa_replenish_rx_cache(void);
+void ipa_cleanup_rx(void);
+int ipa_cfg_filter(u32 disable);
+void ipa_write_done(struct work_struct *work);
+void ipa_handle_rx(struct work_struct *work);
+void ipa_handle_rx_core(void);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+ u32 val = ioread32(base + offset);
+ IPADBG("0x%x(va) read reg 0x%x r_val 0x%x.\n",
+ (u32)base, offset, val);
+ return val;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+ IPADBG("0x%x(va) write reg 0x%x w_val 0x%x.\n",
+ (u32)base, offset, val);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+int ipa_bridge_setup(enum ipa_bridge_dir dir);
+int ipa_bridge_teardown(enum ipa_bridge_dir dir);
+
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_nat.c b/drivers/platform/msm/ipa/ipa_nat.c
new file mode 100644
index 0000000..c13c53a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_nat.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY 0
+#define IPA_NAT_SHARED_MEMORY 1
+
+static int ipa_nat_vma_fault_remap(
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ IPADBG("\n");
+ vmf->page = NULL;
+
+ return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+ .fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_nat_mem *nat_ctx;
+ IPADBG("\n");
+ nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+ filp->private_data = nat_ctx;
+ IPADBG("return\n");
+ return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+ unsigned long phys_addr;
+ int result;
+
+ mutex_lock(&nat_ctx->lock);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ if (nat_ctx->is_mapped) {
+ IPAERR("mapping already exists, only 1 supported\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ IPADBG("map sz=0x%x\n", nat_ctx->size);
+ result =
+ dma_mmap_coherent(
+ NULL, vma,
+ nat_ctx->vaddr, nat_ctx->dma_handle,
+ nat_ctx->size);
+
+ if (result) {
+ IPAERR("unable to map memory. Err:%d\n", result);
+ goto bail;
+ }
+ } else {
+ IPADBG("Mapping shared(local) memory\n");
+ IPADBG("map sz=0x%lx\n", vsize);
+ phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
+ IPA_SRAM_DIRECT_ACCESS_n_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+ if (remap_pfn_range(
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ IPAERR("remap failed\n");
+ result = -EAGAIN;
+ goto bail;
+ }
+
+ }
+ nat_ctx->is_mapped = true;
+ vma->vm_ops = &ipa_nat_remap_vm_ops;
+ IPADBG("return\n");
+ result = 0;
+bail:
+ mutex_unlock(&nat_ctx->lock);
+ return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_nat_open,
+ .mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result;
+
+ IPADBG("passed memory size %d\n", mem->size);
+
+ mutex_lock(&nat_ctx->lock);
+ if (mem->size <= 0 || !strlen(mem->dev_name)
+ || nat_ctx->is_dev_init == true) {
+ IPADBG("Invalid Parameters or device is already init\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ nat_ctx->is_sys_mem = true;
+ nat_ctx->vaddr =
+ dma_alloc_coherent(NULL, mem->size, &nat_ctx->dma_handle,
+ gfp_flags);
+ if (nat_ctx->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ nat_ctx->size = mem->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ nat_ctx->is_sys_mem = false;
+ }
+
+ nat_ctx->class = class_create(THIS_MODULE, mem->dev_name);
+ if (IS_ERR(nat_ctx->class)) {
+ IPAERR("unable to create the class\n");
+ result = -ENODEV;
+ goto vaddr_alloc_fail;
+ }
+ result = alloc_chrdev_region(&nat_ctx->dev_num,
+ 0,
+ 1,
+ mem->dev_name);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto alloc_chrdev_region_fail;
+ }
+
+ nat_ctx->dev =
+ device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+ mem->dev_name);
+
+ if (IS_ERR(nat_ctx->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ result = -ENODEV;
+ goto device_create_fail;
+ }
+
+ cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+ nat_ctx->cdev.owner = THIS_MODULE;
+ nat_ctx->cdev.ops = &ipa_nat_fops;
+
+ result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ if (result) {
+ IPAERR("cdev_add err=%d\n", -result);
+ goto cdev_add_fail;
+ }
+ nat_ctx->is_dev_init = true;
+ IPADBG("IPA NAT driver init successfully\n");
+ result = 0;
+ goto bail;
+
+cdev_add_fail:
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+ class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+ if (nat_ctx->vaddr) {
+ IPADBG("Releasing system memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->vaddr = NULL;
+ nat_ctx->dma_handle = 0;
+ nat_ctx->size = 0;
+ }
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ int result;
+
+ IPADBG("\n");
+ if (init->tbl_index < 0 || init->table_entries <= 0) {
+ IPADBG("Table index or entries is zero\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ if (ipa_ctx->nat_mem.vaddr) {
+ IPADBG("using system memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+ cmd->ipv4_expansion_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+ IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+ cmd->index_table_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd->index_table_expansion_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ init->ipv4_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->ipv4_expansion_rules_addr =
+ init->expn_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_addr = init->index_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_expansion_addr =
+ init->index_expn_offset + IPA_RAM_NAT_OFST;
+ }
+ cmd->table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd->table_index);
+ cmd->size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+ cmd->size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+ cmd->public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ IPADBG("posting v4 init command\n");
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_cmd;
+ }
+
+ IPADBG("return\n");
+ result = 0;
+free_cmd:
+ kfree(cmd);
+bail:
+ return result;
+}
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ struct ipa_nat_dma *cmd = NULL;
+ struct ipa_desc *desc = NULL;
+ u16 size = 0, cnt = 0;
+ int ret = 0;
+
+ IPADBG("\n");
+ if (dma->entries <= 0) {
+ IPADBG("Invalid number of commands\n");
+ ret = -EPERM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_desc) * dma->entries;
+ desc = kmalloc(size, GFP_KERNEL);
+ if (desc == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_nat_dma) * dma->entries;
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ cmd[cnt].table_index = dma->dma[cnt].table_index;
+ cmd[cnt].base_addr = dma->dma[cnt].base_addr;
+ cmd[cnt].offset = dma->dma[cnt].offset;
+ cmd[cnt].data = dma->dma[cnt].data;
+ desc[cnt].type = IPA_IMM_CMD_DESC;
+ desc[cnt].opcode = IPA_NAT_DMA;
+ desc[cnt].callback = NULL;
+ desc[cnt].user1 = NULL;
+
+ desc[cnt].user2 = NULL;
+
+ desc[cnt].len = sizeof(struct ipa_nat_dma);
+ desc[cnt].pyld = (void *)&cmd[cnt];
+ }
+ IPADBG("posting dma command with entries %d\n", dma->entries);
+ ret = ipa_send_cmd(dma->entries, desc);
+ if (ret == -EPERM)
+ IPAERR("Fail to send immediate command\n");
+
+bail:
+ kfree(cmd);
+ kfree(desc);
+
+ return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx: [in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+ IPADBG("\n");
+ mutex_lock(&nat_ctx->lock);
+
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("freeing the dma memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->size = 0;
+ nat_ctx->vaddr = NULL;
+ }
+ nat_ctx->is_mapped = false;
+ nat_ctx->is_sys_mem = false;
+ cdev_del(&nat_ctx->cdev);
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+ class_destroy(nat_ctx->class);
+ nat_ctx->is_dev_init = false;
+
+ mutex_unlock(&nat_ctx->lock);
+ IPADBG("return\n");
+ return;
+}
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ u8 mem_type = IPA_NAT_SHARED_MEMORY;
+ u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+ int result;
+
+ IPADBG("\n");
+ if (del->table_index < 0 || del->public_ip_addr == 0) {
+ IPADBG("Bad Parameter\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ cmd->table_index = del->table_index;
+ cmd->ipv4_rules_addr = base_addr;
+ cmd->ipv4_rules_addr_type = mem_type;
+ cmd->ipv4_expansion_rules_addr = base_addr;
+ cmd->ipv4_expansion_rules_addr_type = mem_type;
+ cmd->index_table_addr = base_addr;
+ cmd->index_table_addr_type = mem_type;
+ cmd->index_table_expansion_addr = base_addr;
+ cmd->index_table_expansion_addr_type = mem_type;
+ cmd->size_base_tables = 0;
+ cmd->size_expansion_tables = 0;
+ cmd->public_ip_addr = del->public_ip_addr;
+
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_mem;
+ }
+
+ ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+ IPADBG("return\n");
+ result = 0;
+free_mem:
+ kfree(cmd);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
new file mode 100644
index 0000000..000718b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all 8K is available
+ * for SW use) the first 2K are set aside for NAT
+ */
+
+#define IPA_RAM_NAT_OFST 0
+#define IPA_RAM_NAT_SIZE 2048
+#define IPA_RAM_HDR_OFST 2048
+#define IPA_RAM_HDR_SIZE 256
+#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
+#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_RT_OFST (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
+#define IPA_RAM_V4_RT_SIZE 1024
+#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
+#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_RT_OFST (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
+#define IPA_RAM_V6_RT_SIZE 1024
+#define IPA_RAM_END_OFST (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
new file mode 100644
index 0000000..61913b6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+/*
+ * IPA's core specific regtisters
+ */
+
+#define IPA_REG_BASE_OFST 0x00020000
+
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+#define IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define IPA_COMP_HW_VERSION_MAJOR_BMSK 0xff000000
+#define IPA_COMP_HW_VERSION_MAJOR_SHFT 0x18
+#define IPA_COMP_HW_VERSION_MINOR_BMSK 0xff0000
+#define IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_VERSION_RMSK 0xffffffff
+#define IPA_VERSION_IPA_R_REV_BMSK 0xff000000
+#define IPA_VERSION_IPA_R_REV_SHFT 0x18
+#define IPA_VERSION_IPA_Q_REV_BMSK 0xff0000
+#define IPA_VERSION_IPA_Q_REV_SHFT 0x10
+#define IPA_VERSION_IPA_P_REV_BMSK 0xff00
+#define IPA_VERSION_IPA_P_REV_SHFT 0x8
+#define IPA_VERSION_IPA_ECO_REV_BMSK 0xff
+#define IPA_VERSION_IPA_ECO_REV_SHFT 0x0
+
+#define IPA_COMP_CFG_OFST 0x00000038
+#define IPA_COMP_CFG_RMSK 0x1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0x0
+
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+#define IPA_COMP_SW_RESET_RMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_BMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_SHFT 0x0
+
+#define IPA_CLKON_CFG_OFST 0x00000040
+#define IPA_CLKON_CFG_RMSK 0xf
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_TX_BMSK 0x4
+#define IPA_CLKON_CFG_CGC_OPEN_TX_SHFT 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_SHFT 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+#define IPA_HEAD_OF_LINE_BLOCK_EN_RMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_BMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_OFST 0x00000048
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_RMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_BMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_SHFT 0x0
+
+#define IPA_ROUTE_OFST 0x0000004c
+#define IPA_ROUTE_RMSK 0x1ffff
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+
+#define IPA_FILTER_OFST 0x00000050
+#define IPA_FILTER_RMSK 0x1
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_MASTER_PRIORITY_OFST 0x00000054
+#define IPA_MASTER_PRIORITY_RMSK 0xffffffff
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_BMSK 0xc0000000
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_SHFT 0x1e
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_BMSK 0x30000000
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_SHFT 0x1c
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_BMSK 0xc000000
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_SHFT 0x1a
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_BMSK 0x3000000
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_SHFT 0x18
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_BMSK 0xc00000
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_SHFT 0x16
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_BMSK 0x300000
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_SHFT 0x14
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_BMSK 0xc0000
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_SHFT 0x12
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_BMSK 0x30000
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_SHFT 0x10
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_BMSK 0xc000
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_SHFT 0xe
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_BMSK 0x3000
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_SHFT 0xc
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_BMSK 0xc00
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_SHFT 0xa
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_BMSK 0x300
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_SHFT 0x8
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_BMSK 0xc0
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_SHFT 0x6
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_BMSK 0x30
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_SHFT 0x4
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_BMSK 0xc
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_SHFT 0x2
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_BMSK 0x3
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_SHFT 0x0
+
+#define IPA_SHARED_MEM_SIZE_OFST 0x00000058
+#define IPA_SHARED_MEM_SIZE_RMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+#define IPA_NAT_TIMER_OFST 0x0000005c
+#define IPA_NAT_TIMER_RMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_BMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_SHFT 0x0
+
+#define IPA_NAT_TIMER_RESET_OFST 0x00000060
+#define IPA_NAT_TIMER_RESET_RMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_BMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_SHFT 0x0
+
+#define IPA_ENDP_INIT_NAT_n_OFST(n) (0x00000080 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_MAXn 19
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_n_OFST(n) (0x000000e0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_n_RMSK 0x7ffffff
+#define IPA_ENDP_INIT_HDR_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_OFST(n) (0x00000140 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_n_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_n_MAXn 19
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x7c
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x2
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_OFST(n) (0x000001a0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_n_RMSK 0x7fff
+#define IPA_ENDP_INIT_AGGR_n_MAXn 19
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_ROUTE_n_OFST(n) (0x00000200 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_n_RMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_MAXn 19
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_1_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_SPARE_REG_2_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_SRAM_DIRECT_ACCESS_n_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_n_RMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_MAXn 2047
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_BMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_SHFT 0x0
+
+#endif /* __IPA_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
new file mode 100644
index 0000000..c69e1fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_RT_TABLE_WORD_SIZE (4)
+#define IPA_RT_INDEX_BITMAP_SIZE (32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
+#define IPA_RT_BIT_MASK (0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_rt_hw_rule() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ struct ipa_rt_rule_hw_hdr *rule_hdr;
+ const struct ipa_rt_rule *rule =
+ (const struct ipa_rt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+ int pipe_idx;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+ pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst);
+ if (pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+ rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+ if (entry->hdr)
+ rule_hdr->u.hdr.hdr_offset =
+ entry->hdr->offset_entry->offset >> 2;
+ else
+ rule_hdr->u.hdr.hdr_offset = 0;
+
+ buf += sizeof(struct ipa_rt_rule_hw_hdr);
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule 0x%x\n", en_rule);
+
+ rule_hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR(
+ "hw_len differs b/w passes passed=0x%x calc=0x%x\n",
+ entry->hw_len,
+ (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+ int *max_rt_idx)
+{
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ u32 total_sz = 0;
+ u32 tbl_sz;
+ u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+ int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+ int i;
+
+ *hdr_sz = 0;
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (bitmap & IPA_RT_BIT_MASK)
+ highest_bit_set = i;
+ bitmap >>= 1;
+ }
+
+ *max_rt_idx = highest_bit_set;
+ if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("no rt tbls present\n");
+ total_sz = IPA_RT_TABLE_WORD_SIZE;
+ *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+ return total_sz;
+ }
+
+ *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ tbl_sz = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW RT rule size\n");
+ return -EPERM;
+ }
+ tbl_sz += entry->hw_len;
+ }
+
+ if (tbl_sz)
+ tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+ if (tbl->in_sys)
+ continue;
+
+ if (tbl_sz) {
+ /* add the terminator */
+ total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+ /* every rule-set should start at word boundary */
+ total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer rt_tbl_mem;
+ u8 *rt_tbl_mem_body;
+ int max_rt_idx;
+ int i;
+
+ mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+ if (mem->size == 0) {
+ IPAERR("rt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* setup all indices to point to the empty sys rt tbl */
+ for (i = 0; i <= max_rt_idx; i++)
+ ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+ hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ offset = body - base;
+ if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n", offset);
+ goto proc_err;
+ }
+
+ if (!tbl->in_sys) {
+ /* convert offset to words from bytes */
+ offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_RT_BIT_MASK;
+
+ /* update the hdr at the right index */
+ ipa_write_32(offset, hdr +
+ (tbl->idx * IPA_RT_TABLE_WORD_SIZE));
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_RT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the RT tbl */
+ rt_tbl_mem.size = tbl->sz;
+ rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, rt_tbl_mem.size,
+ &rt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!rt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ rt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(rt_tbl_mem.phys_base &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+ rt_tbl_mem_body = rt_tbl_mem.base;
+ memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+ /* update the hdr at the right index */
+ ipa_write_32(rt_tbl_mem.phys_base,
+ hdr + (tbl->idx *
+ IPA_RT_TABLE_WORD_SIZE));
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry,
+ rt_tbl_mem_body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ WARN_ON(1);
+ goto rt_table_mem_alloc_failed;
+ }
+ rt_tbl_mem_body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = rt_tbl_mem;
+ }
+ }
+
+ return 0;
+
+rt_table_mem_alloc_failed:
+ dma_free_coherent(NULL, rt_tbl_mem.size,
+ rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *next;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+ }
+
+ set = &ipa_ctx->reap_rt_tbl_set[ip];
+ list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+ list_del(&tbl->link);
+ WARN_ON(tbl->prev_mem.phys_base != 0);
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+ ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ }
+ }
+}
+
+static int __ipa_commit_rt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_routing_init *v4;
+ struct ipa_ip_v6_routing_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_RT_SIZE;
+ size = sizeof(struct ipa_ip_v4_routing_init);
+ } else {
+ avail = IPA_RAM_V6_RT_SIZE;
+ size = sizeof(struct ipa_ip_v6_routing_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_rt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_routing_init *)cmd;
+ desc.opcode = IPA_IP_V4_ROUTING_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_routing_init *)cmd;
+ desc.opcode = IPA_IP_V6_ROUTING_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_rt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+ return -EPERM;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ * which name is given as parameter
+ * @ip: [in] the ip address family type of the wanted routing table
+ * @name: [in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+ const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_tree_node *node;
+ int i;
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto node_alloc_fail;
+ }
+
+ if (ip >= IPA_IP_MAX || name == NULL) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ /* check if this table exists */
+ entry = __ipa_find_rt_tbl(ip, name);
+ if (!entry) {
+ entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT tbl object\n");
+ goto error;
+ }
+ /* find a routing tbl index */
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+ entry->idx = i;
+ set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+ break;
+ }
+ }
+ if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+ IPAERR("not free RT tbl indices left\n");
+ goto fail_rt_idx_alloc;
+ }
+
+ INIT_LIST_HEAD(&entry->head_rt_rule_list);
+ INIT_LIST_HEAD(&entry->link);
+ strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+ entry->set = set;
+ entry->cookie = IPA_COOKIE;
+ entry->in_sys = (ip == IPA_IP_v4) ?
+ !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+ set->tbl_cnt++;
+ list_add(&entry->link, &set->head_rt_tbl_list);
+
+ IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+ set->tbl_cnt, ip);
+
+ node->hdl = (u32)entry;
+ if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ }
+
+ return entry;
+
+fail_rt_idx_alloc:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+node_alloc_fail:
+ return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ if (!entry->in_sys) {
+ list_del(&entry->link);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+ } else {
+ list_move(&entry->link,
+ &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ }
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (rule->hdr_hdl &&
+ ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
+ ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ tbl = __ipa_add_rt_tbl(ip, name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ goto fail_rt_tbl_sanity;
+ }
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
+ (tbl->rule_cnt > 0) && (at_rear != 0)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+ tbl->rule_cnt, at_rear);
+ goto fail_rt_tbl_sanity;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT rule object\n");
+ goto fail_rt_tbl_sanity;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->cookie = IPA_COOKIE;
+ entry->rule = *rule;
+ entry->tbl = tbl;
+ entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
+ if (at_rear)
+ list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_rt_rule_list);
+ tbl->rule_cnt++;
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+ *rule_hdl = (u32)entry;
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ goto ipa_insert_failed;
+ }
+
+ return 0;
+
+ipa_insert_failed:
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+fail_rt_tbl_sanity:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+static int __ipa_del_rt_rule(u32 rule_hdl)
+{
+ struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+ entry->tbl->rule_cnt);
+ if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry->tbl))
+ IPAERR("fail to del RT tbl\n");
+ }
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int i;
+ int ret;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_rt(hdls->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+ /*
+ * issue a commit on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_commit_flt(ip))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_rt(ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *tbl_next;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_entry *rule;
+ struct ipa_rt_entry *rule_next;
+ struct ipa_tree_node *node;
+ struct ipa_rt_tbl_set *rset;
+
+ /*
+ * issue a reset on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_reset_flt(ip))
+ IPAERR("fail to reset flt ip=%d\n", ip);
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ rset = &ipa_ctx->reap_rt_tbl_set[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset rt ip=%d\n", ip);
+ list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ list_for_each_entry_safe(rule, rule_next,
+ &tbl->head_rt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
+ (u32)rule);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /*
+ * for the "default" routing tbl, remove all but the
+ * last rule
+ */
+ if (tbl->idx == 0 && tbl->rule_cnt == 1)
+ continue;
+
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ rule->hdr->ref_cnt--;
+ rule->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /* do not remove the "default" routing tbl which has index 0 */
+ if (tbl->idx != 0) {
+ if (!tbl->in_sys) {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ } else {
+ list_move(&tbl->link, &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ }
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ struct ipa_rt_tbl *entry;
+ int result = -EFAULT;
+
+ if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
+ if (entry && entry->cookie == IPA_COOKIE) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t)entry;
+
+ /* commit for get */
+ if (__ipa_commit_rt(lookup->ip))
+ IPAERR("fail to commit RT tbl\n");
+
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE) ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry))
+ IPAERR("fail to del RT tbl\n");
+ /* commit for put */
+ if (__ipa_commit_rt(ip))
+ IPAERR("fail to commit RT tbl\n");
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
new file mode 100644
index 0000000..d5d5566
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h> /* gen_pool_alloc() */
+#include <linux/io.h>
+#include "ipa_i.h"
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1, -1 };
+
+static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+};
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST,
+ IPA_SETFIELD(route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK) |
+ IPA_SETFIELD(route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+
+ return 0;
+}
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_FILTER_OFST,
+ IPA_SETFIELD(!disable,
+ IPA_FILTER_FILTER_EN_SHFT,
+ IPA_FILTER_FILTER_EN_BMSK));
+ return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+ u32 ipa_version = 0;
+
+ /* do soft reset of IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+ /* enable IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+ /* Read IPA version and make sure we have access to the registers */
+ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+ if (ipa_version == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @mode: IPA operating mode
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client)
+{
+ return ep_mapping[mode][client];
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+ *dest++ = (u8)((hw) & 0xFF);
+ *dest++ = (u8)((hw >> 8) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+ *dest++ = (b) & 0xFF;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+ int i = (u32)dest & 0x3;
+ int j;
+
+ if (i)
+ for (j = 0; j < (4 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+
+ if (ip == IPA_IP_v4) {
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+ IPA_FLT_FLOW_LABEL) {
+ IPAERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_TOS_EQ;
+ *buf = ipa_write_8(attrib->u.v4.tos, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 12 => offset of src ip in v4 header */
+ *buf = ipa_write_8(12, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 16 => offset of dst ip in v4 header */
+ *buf = ipa_write_8(16, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v4 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v4 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IPV4_IS_FRAG;
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else if (ip == IPA_IP_v6) {
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL ||
+ attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ IPAERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v6 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v6 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 8 => offset of src ip in v6 header */
+ *buf = ipa_write_8(8, *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 24 => offset of dst ip in v6 header */
+ *buf = ipa_write_8(24, *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_FLT_TC;
+ *buf = ipa_write_8(attrib->u.v6.tc, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_FLT_FLOW_LABEL;
+ /* FIXME FL is only 20 bits */
+ *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else {
+ IPAERR("unsupported ip %d\n", ip);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ *buf = ipa_write_8(0, *buf); /* offset */
+ *buf = ipa_write_32(0, *buf); /* mask */
+ *buf = ipa_write_32(0, *buf); /* val */
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int result = -EINVAL;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+ if (result)
+ return result;
+
+ if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+ result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+ /* clnt_hdl is used as pipe_index */
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ u32 val;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
+ ipa_ctx->ep[clnt_hdl].dst_pipe_index = ipa_get_ep_mapping(ipa_ctx->mode,
+ ipa_ep_cfg->dst);
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * if DMA mode was configured previously for this EP, return with
+ * success
+ */
+ if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+ IPADBG("DMA mode for EP %d\n", clnt_hdl);
+ return 0;
+ }
+
+ if (ipa_ep_cfg->rt_tbl_hdl)
+ IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+ /* always use the "default" routing tables whose indices are 0 */
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+ int i;
+ u32 *cur = (u32 *)base;
+ u8 *byt;
+ IPADBG("START phys=%x\n", phy_base);
+ for (i = 0; i < size / 4; i++) {
+ byt = (u8 *)(cur + i);
+ IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
+ byt[0], byt[1], byt[2], byt[3]);
+ }
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_dump() - dumps part of driver data structures for debug purposes
+ */
+void ipa_dump(void)
+{
+ struct ipa_mem_buffer hdr_mem = { 0 };
+ struct ipa_mem_buffer rt_mem = { 0 };
+ struct ipa_mem_buffer flt_mem = { 0 };
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_generate_hdr_hw_tbl(&hdr_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
+ IPAERR("fail\n");
+ IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
+ flt_mem.phys_base);
+ IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
+ (u32)rt_mem.base, (u32)flt_mem.base);
+ IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
+ flt_mem.size);
+ IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+ IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
+ IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
+ if (hdr_mem.phys_base)
+ dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
+ hdr_mem.phys_base);
+ if (rt_mem.phys_base)
+ dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
+ rt_mem.phys_base);
+ if (flt_mem.phys_base)
+ dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
+ flt_mem.phys_base);
+ mutex_unlock(&ipa_ctx->lock);
+}
+
+/*
+ * TODO: add swap if needed, for now assume LE is ok for device memory
+ * even though IPA registers are assumed to be BE
+ */
+/**
+ * ipa_write_dev_8() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram)
+{
+ iowrite8(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_16() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ *
+ */
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram)
+{
+ iowrite16(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_32() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram)
+{
+ iowrite32(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_8() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram)
+{
+ return ioread8((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_16() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram)
+{
+ return ioread16((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_32() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram)
+{
+ return ioread32((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_8rep() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf, unsigned long count)
+{
+ iowrite8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_write_dev_16rep() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_write_dev_32rep() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_read_dev_8rep() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_16rep() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_32rep() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_memset_dev() - memset IO
+ * @ofst_ipa_sram: address to set
+ * @value: value
+ * @count: number of bytes to set
+ */
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count)
+{
+ memset_io((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), value,
+ count);
+}
+
+/**
+ * ipa_memcpy_from_dev() - copy memory from device
+ * @dest: buffer to copy to
+ * @ofst_ipa_sram: address
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count)
+{
+ memcpy_fromio(dest, (void *)((u32)ipa_ctx->mmio + 0x4000 +
+ ofst_ipa_sram), count);
+}
+
+/**
+ * ipa_memcpy_to_dev() - copy memory to device
+ * @ofst_ipa_sram: address
+ * @source: buffer to copy from
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count)
+{
+ memcpy_toio((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ source, count);
+}
+
+/**
+ * ipa_defrag() - handle de-frag for bridging type of cases
+ * @skb: skb
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_defrag(struct sk_buff *skb)
+{
+ /*
+ * Reassemble IP fragments. TODO: need to setup network_header to
+ * point to start of IP header
+ */
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+ if (ip_defrag(skb, IP_DEFRAG_CONNTRACK_IN))
+ return -EINPROGRESS;
+ }
+
+ /* skb is not fully assembled, send it back out */
+ return 0;
+}
+
+/**
+ * ipa_search() - search for handle in RB tree
+ * @root: tree root
+ * @hdl: handle
+ *
+ * Return value: tree node corresponding to the handle
+ */
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct ipa_tree_node *data = container_of(node,
+ struct ipa_tree_node, node);
+
+ if (hdl < data->hdl)
+ node = node->rb_left;
+ else if (hdl > data->hdl)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/**
+ * ipa_insert() - insert new node to RB tree
+ * @root: tree root
+ * @data: new data to insert
+ *
+ * Return value:
+ * 0: success
+ * -EPERM: tree already contains the node with provided handle
+ */
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct ipa_tree_node *this = container_of(*new,
+ struct ipa_tree_node, node);
+
+ parent = *new;
+ if (data->hdl < this->hdl)
+ new = &((*new)->rb_left);
+ else if (data->hdl > this->hdl)
+ new = &((*new)->rb_right);
+ else
+ return -EPERM;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return 0;
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+ int res;
+ u32 aligned_start_ofst;
+ u32 aligned_size;
+ struct gen_pool *pool;
+
+ if (!size) {
+ IPAERR("no IPA pipe mem alloted\n");
+ goto fail;
+ }
+
+ aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+ aligned_size = size - (aligned_start_ofst - start_ofst);
+
+ IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+ start_ofst, aligned_start_ofst, size, aligned_size);
+
+ /* allocation order of 8 i.e. 128 bytes, global pool */
+ pool = gen_pool_create(8, -1);
+ if (!pool) {
+ IPAERR("Failed to create a new memory pool.\n");
+ goto fail;
+ }
+
+ res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+ if (res) {
+ IPAERR("Failed to add memory to IPA pipe pool\n");
+ goto err_pool_add;
+ }
+
+ ipa_ctx->pipe_mem_pool = pool;
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(pool);
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+ u32 vaddr;
+ int res = -1;
+
+ if (!ipa_ctx->pipe_mem_pool || !size) {
+ IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+ ipa_ctx->pipe_mem_pool);
+ return res;
+ }
+
+ vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+ if (vaddr) {
+ *ofst = vaddr;
+ res = 0;
+ IPADBG("size=%u ofst=%u\n", size, vaddr);
+ } else {
+ IPAERR("size=%u failed\n", size);
+ }
+
+ return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+ IPADBG("size=%u ofst=%u\n", size, ofst);
+ if (ipa_ctx->pipe_mem_pool && size)
+ gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+ return 0;
+}
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST,
+ ((mode & IPA_AGGREGATION_MODE_MSK) <<
+ IPA_AGGREGATION_MODE_SHFT) |
+ (reg_val & IPA_AGGREGATION_MODE_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ u32 reg_val;
+
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
+ IPA_AGGREGATION_QCNCM_SIG0_SHFT |
+ (sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
+ sig[2] | (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
+ IPA_AGGREGATION_SINGLE_NDP_MSK) |
+ (reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+ u32 next_start;
+ u32 prev_end;
+
+ IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+ next_start = (start + (boundary - 1)) & ~(boundary - 1);
+ prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+ while (next_start < prev_end)
+ next_start += boundary;
+
+ if (next_start == prev_end)
+ return 1;
+ else
+ return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
new file mode 100644
index 0000000..3c7f5ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "a2_service.h"
+#include "ipa_i.h"
+
+static struct rmnet_bridge_cb_type {
+ u32 producer_handle;
+ u32 consumer_handle;
+ bool is_connected;
+} rmnet_bridge_cb;
+
+/**
+* rmnet_bridge_init() - Initialize RmNet bridge module
+*
+* Return codes:
+* 0: success
+*/
+int rmnet_bridge_init(void)
+{
+ memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
+
+ return 0;
+}
+EXPORT_SYMBOL(rmnet_bridge_init);
+
+/**
+* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_disconnect(void)
+{
+ int ret = 0;
+ if (false == rmnet_bridge_cb.is_connected) {
+ pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.is_connected = false;
+
+ ret = ipa_bridge_teardown(IPA_DL);
+ ret = ipa_bridge_teardown(IPA_UL);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_disconnect);
+
+/**
+* rmnet_bridge_connect() - Connect RmNet bridge module
+* @producer_hdl: IPA producer handle
+* @consumer_hdl: IPA consumer handle
+* @wwan_logical_channel_id: WWAN logical channel ID
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id)
+{
+ int ret = 0;
+
+ if (true == rmnet_bridge_cb.is_connected) {
+ ret = 0;
+ pr_err("%s: trying to connect already connected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.consumer_handle = consumer_hdl;
+ rmnet_bridge_cb.producer_handle = producer_hdl;
+ rmnet_bridge_cb.is_connected = true;
+
+ ret = ipa_bridge_setup(IPA_DL);
+ if (ret) {
+ pr_err("%s: IPA DL bridge setup failure\n", __func__);
+ goto bail_dl;
+ }
+ ret = ipa_bridge_setup(IPA_UL);
+ if (ret) {
+ pr_err("%s: IPA UL bridge setup failure\n", __func__);
+ goto bail_ul;
+ }
+ return 0;
+bail_ul:
+ ipa_bridge_teardown(IPA_DL);
+bail_dl:
+ rmnet_bridge_cb.is_connected = false;
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_connect);
+
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle)
+{
+ if (producer_handle == NULL || consumer_handle == NULL)
+ return;
+
+ *producer_handle = rmnet_bridge_cb.producer_handle;
+ *consumer_handle = rmnet_bridge_cb.consumer_handle;
+}
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index cb6b23e..8dbdfa3 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -291,6 +291,7 @@
u8 active_path;
int recent_reported_soc;
int battery_less_hardware;
+ int ibatmax_max_adj_ma;
};
/* user space parameter to limit usb current */
@@ -635,10 +636,26 @@
}
#define PM8921_CHG_IBATMAX_MIN 325
-#define PM8921_CHG_IBATMAX_MAX 2000
+#define PM8921_CHG_IBATMAX_MAX 3025
#define PM8921_CHG_I_MIN_MA 225
#define PM8921_CHG_I_STEP_MA 50
#define PM8921_CHG_I_MASK 0x3F
+static int pm_chg_ibatmax_get(struct pm8921_chg_chip *chip, int *ibat_ma)
+{
+ u8 temp;
+ int rc;
+
+ rc = pm8xxx_readb(chip->dev->parent, CHG_IBAT_MAX, &temp);
+ if (rc) {
+ pr_err("rc = %d while reading ibat max\n", rc);
+ *ibat_ma = 0;
+ return rc;
+ }
+ *ibat_ma = (int)(temp & PM8921_CHG_I_MASK) * PM8921_CHG_I_STEP_MA
+ + PM8921_CHG_I_MIN_MA;
+ return 0;
+}
+
static int pm_chg_ibatmax_set(struct pm8921_chg_chip *chip, int chg_current)
{
u8 temp;
@@ -2891,6 +2908,30 @@
return IRQ_HANDLED;
}
+struct ibatmax_max_adj_entry {
+ int ibat_max_ma;
+ int max_adj_ma;
+};
+
+static struct ibatmax_max_adj_entry ibatmax_adj_table[] = {
+ {975, 300},
+ {1475, 150},
+ {1975, 200},
+ {2475, 250},
+};
+
+static int find_ibat_max_adj_ma(int ibat_target_ma)
+{
+ int i = 0;
+
+ for (i = ARRAY_SIZE(ibatmax_adj_table) - 1; i >= 0; i--) {
+ if (ibat_target_ma <= ibatmax_adj_table[i].ibat_max_ma)
+ break;
+ }
+
+ return ibatmax_adj_table[i].max_adj_ma;
+}
+
static irqreturn_t fastchg_irq_handler(int irq, void *data)
{
struct pm8921_chg_chip *chip = data;
@@ -4207,6 +4248,81 @@
}
DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
+static int reg_loop;
+#define MAX_REG_LOOP_CHAR 10
+static int get_reg_loop_param(char *buf, struct kernel_param *kp)
+{
+ u8 temp;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ temp = pm_chg_get_regulation_loop(the_chip);
+ return snprintf(buf, MAX_REG_LOOP_CHAR, "%d", temp);
+}
+module_param_call(reg_loop, NULL, get_reg_loop_param,
+ ®_loop, 0644);
+
+static int max_chg_ma;
+#define MAX_MA_CHAR 10
+static int get_max_chg_ma_param(char *buf, struct kernel_param *kp)
+{
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ return snprintf(buf, MAX_MA_CHAR, "%d", the_chip->max_bat_chg_current);
+}
+module_param_call(max_chg_ma, NULL, get_max_chg_ma_param,
+ &max_chg_ma, 0644);
+static int ibatmax_ma;
+static int set_ibat_max(const char *val, struct kernel_param *kp)
+{
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("error setting value %d\n", rc);
+ return rc;
+ }
+
+ if (abs(ibatmax_ma - the_chip->max_bat_chg_current)
+ <= the_chip->ibatmax_max_adj_ma) {
+ rc = pm_chg_ibatmax_set(the_chip, ibatmax_ma);
+ if (rc) {
+ pr_err("Failed to set ibatmax rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+static int get_ibat_max(char *buf, struct kernel_param *kp)
+{
+ int ibat_ma;
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = pm_chg_ibatmax_get(the_chip, &ibat_ma);
+ if (rc) {
+ pr_err("ibatmax_get error = %d\n", rc);
+ return rc;
+ }
+
+ return snprintf(buf, MAX_MA_CHAR, "%d", ibat_ma);
+}
+module_param_call(ibatmax_ma, set_ibat_max, get_ibat_max,
+ &ibatmax_ma, 0644);
enum {
BAT_WARM_ZONE,
BAT_COOL_ZONE,
@@ -4445,6 +4561,9 @@
if (chip->battery_less_hardware)
charging_disabled = 1;
+ chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
+ chip->max_bat_chg_current);
+
rc = pm8921_chg_hw_init(chip);
if (rc) {
pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/smb137c-charger.c b/drivers/power/smb137c-charger.c
index b865bd7..9cdf5b5 100644
--- a/drivers/power/smb137c-charger.c
+++ b/drivers/power/smb137c-charger.c
@@ -992,29 +992,47 @@
{
struct smb137c_chip *chip = container_of(psy, struct smb137c_chip, psy);
union power_supply_propval prop = {0,};
+ int scope = POWER_SUPPLY_SCOPE_DEVICE;
+ int current_limit = USB_MIN_CURRENT_UA;
+ int online = 0;
+ int rc;
mutex_lock(&chip->lock);
dev_dbg(&chip->client->dev, "%s: start\n", __func__);
- chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_ONLINE,
- &prop);
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB online property, rc=%d\n",
+ __func__, rc);
+ else
+ online = prop.intval;
- if (prop.intval) {
- /* USB online */
- chip->usb_psy->get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_SCOPE, &prop);
- if (prop.intval == POWER_SUPPLY_SCOPE_SYSTEM) {
- /* USB host mode */
- smb137c_enable_otg_mode(chip);
- smb137c_disable_charging(chip);
- } else {
- /* USB device mode */
- chip->usb_psy->get_property(chip->usb_psy,
+ rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_SCOPE,
+ &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB scope property, rc=%d\n",
+ __func__, rc);
+ else
+ scope = prop.intval;
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
- smb137c_set_usb_input_current_limit(chip, prop.intval);
- smb137c_enable_charging(chip);
- smb137c_disable_otg_mode(chip);
- }
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB current_max property, rc=%d\n",
+ __func__, rc);
+ else
+ current_limit = prop.intval;
+
+ if (scope == POWER_SUPPLY_SCOPE_SYSTEM) {
+ /* USB host mode */
+ smb137c_disable_charging(chip);
+ smb137c_enable_otg_mode(chip);
+ } else if (online) {
+ /* USB online in device mode */
+ smb137c_set_usb_input_current_limit(chip, current_limit);
+ smb137c_enable_charging(chip);
+ smb137c_disable_otg_mode(chip);
} else {
/* USB offline */
smb137c_disable_charging(chip);
@@ -1318,7 +1336,6 @@
};
MODULE_DEVICE_TABLE(i2c, smb137c_id);
-/* TODO: should this be "summit,smb137c-charger"? */
static const struct of_device_id smb137c_match[] = {
{ .compatible = "summit,smb137c", },
{ },
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 57cde45..0ebb944 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1897,6 +1897,8 @@
if (rdev->desc->ops->list_voltage)
selector = rdev->desc->ops->list_voltage(rdev,
selector);
+ else if (rdev->desc->ops->get_voltage)
+ selector = rdev->desc->ops->get_voltage(rdev);
else
selector = -1;
} else if (rdev->desc->ops->set_voltage_sel) {
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 0549593..a330f1b 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -550,11 +550,12 @@
}
static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
- int min_uV, int max_uV, int *range_sel, int *voltage_sel)
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
{
struct qpnp_voltage_range *range;
int uV = min_uV;
- int lim_min_uV, lim_max_uV, i;
+ int lim_min_uV, lim_max_uV, i, range_id;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -575,7 +576,8 @@
for (i = vreg->set_points->count - 1; i > 0; i--)
if (uV > vreg->set_points->range[i - 1].max_uV)
break;
- range = &vreg->set_points->range[i];
+ range_id = i;
+ range = &vreg->set_points->range[range_id];
*range_sel = range->range_sel;
/*
@@ -594,6 +596,11 @@
return -EINVAL;
}
+ *selector = 0;
+ for (i = 0; i < range_id; i++)
+ *selector += vreg->set_points->range[i].n_voltages;
+ *selector += (uV - range->set_point_min_uV) / range->step_uV;
+
return 0;
}
@@ -605,7 +612,7 @@
u8 buf[2];
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
@@ -669,7 +676,7 @@
int rc, range_sel, voltage_sel;
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index becd823..2161fac 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,3 +4,7 @@
obj-$(CONFIG_SPMI) += spmi.o spmi-resources.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
obj-$(CONFIG_MSM_QPNP_INT) += qpnp-int.o
+
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_SPMI) += spmi-dbgfs.o
+endif
diff --git a/drivers/spmi/spmi-dbgfs.c b/drivers/spmi/spmi-dbgfs.c
new file mode 100644
index 0000000..a23f945
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.c
@@ -0,0 +1,725 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * SPMI Debug-fs support.
+ *
+ * Hierarchy schema:
+ * /sys/kernel/debug/spmi
+ * /help -- static help text
+ * /spmi-0
+ * /spmi-0/address -- Starting register address for reads or writes
+ * /spmi-0/count -- number of registers to read (only on read)
+ * /spmi-0/data -- Triggers the SPMI formatted read.
+ * /spmi-0/data_raw -- Triggers the SPMI raw read or write
+ * /spmi-#
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/ctype.h>
+
+#define ADDR_LEN 6 /* 5 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 16 /* 16 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "spmi";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+
+/* Log buffer */
+struct spmi_log_buffer {
+ u32 rpos; /* Current 'read' position in buffer */
+ u32 wpos; /* Current 'write' position in buffer */
+ u32 len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* SPMI controller specific data */
+struct spmi_ctrl_data {
+ u32 cnt;
+ u32 addr;
+ struct list_head node;
+ struct spmi_controller *ctrl;
+};
+
+/* SPMI transaction parameters */
+struct spmi_trans {
+ u32 cnt; /* Number of bytes to read */
+ u32 addr; /* 20-bit address: SID + PID + Register offset */
+ u32 offset; /* Offset of last read data */
+ bool raw_data; /* Set to true for raw data dump */
+ struct spmi_controller *ctrl;
+ struct spmi_log_buffer *log; /* log buffer */
+};
+
+struct spmi_dbgfs {
+ struct dentry *root;
+ struct mutex lock;
+ struct list_head ctrl; /* List of spmi_ctrl_data nodes */
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct spmi_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .ctrl = LIST_HEAD_INIT(dbgfs_data.ctrl),
+ .help_msg = {
+ .data =
+"SPMI Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/spmi\n"
+" /help -- Static help text\n"
+" /spmi-0 -- Directory for SPMI bus 0\n"
+" /spmi-0/address -- Starting register address for reads or writes\n"
+" /spmi-0/count -- Number of registers to read (only used for reads)\n"
+" /spmi-0/data -- Initiates the SPMI read (formatted output)\n"
+" /spmi-0/data_raw -- Initiates the SPMI raw read or write\n"
+" /spmi-n -- Directory for SPMI bus n\n"
+"\n"
+"To perform SPMI read or write transactions, you need to first write the\n"
+"address of the slave device register to the 'address' file. For read\n"
+"transactions, the number of bytes to be read needs to be written to the\n"
+"'count' file.\n"
+"\n"
+"The 'address' file specifies the 20-bit address of a slave device register.\n"
+"The upper 4 bits 'address[19..16]' specify the slave identifier (SID) for\n"
+"the slave device. The lower 16 bits specify the slave register address.\n"
+"\n"
+"Reading from the 'data' file will initiate a SPMI read transaction starting\n"
+"from slave register 'address' for 'count' number of bytes.\n"
+"\n"
+"Writing to the 'data' file will initiate a SPMI write transaction starting\n"
+"from slave register 'address'. The number of registers written to will\n"
+"match the number of bytes written to the 'data' file.\n"
+"\n"
+"Example: Read 4 bytes starting at register address 0x1234 for SID 2\n"
+"\n"
+"echo 0x21234 > address\n"
+"echo 4 > count\n"
+"cat data\n"
+"\n"
+"Example: Write 3 bytes starting at register address 0x1008 for SID 1\n"
+"\n"
+"echo 0x11008 > address\n"
+"echo 0x01 0x02 0x03 > data\n"
+"\n"
+"Note that the count file is not used for writes. Since 3 bytes are\n"
+"written to the 'data' file, then 3 bytes will be written across the\n"
+"SPMI bus.\n\n",
+ },
+};
+
+static int spmi_dfs_open(struct spmi_ctrl_data *ctrl_data, struct file *file)
+{
+ struct spmi_log_buffer *log;
+ struct spmi_trans *trans;
+
+ size_t logbufsize = SZ_4K;
+
+ if (!ctrl_data) {
+ pr_err("No SPMI controller data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ trans->log = log;
+ trans->cnt = ctrl_data->cnt;
+ trans->addr = ctrl_data->addr;
+ trans->ctrl = ctrl_data->ctrl;
+ trans->offset = trans->addr;
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int spmi_dfs_data_open(struct inode *inode, struct file *file)
+{
+ struct spmi_ctrl_data *ctrl_data = inode->i_private;
+ return spmi_dfs_open(ctrl_data, file);
+}
+
+static int spmi_dfs_raw_data_open(struct inode *inode, struct file *file)
+{
+ int rc;
+ struct spmi_trans *trans;
+ struct spmi_ctrl_data *ctrl_data = inode->i_private;
+
+ rc = spmi_dfs_open(ctrl_data, file);
+ trans = file->private_data;
+ trans->raw_data = true;
+ return rc;
+}
+
+static int spmi_dfs_close(struct inode *inode, struct file *file)
+{
+ struct spmi_trans *trans = file->private_data;
+
+ if (trans && trans->log) {
+ file->private_data = NULL;
+ kfree(trans->log);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * spmi_read_data: reads data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: buffer to store the data read.
+ * @offset: SPMI address offset to start reading from.
+ * @cnt: The number of bytes to read.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_read_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+ int ret = 0;
+ int len;
+ uint8_t sid;
+ uint16_t addr;
+
+ while (cnt > 0) {
+ sid = (offset >> 16) & 0xF;
+ addr = offset & 0xFFFF;
+ len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+ ret = spmi_ext_register_readl(ctrl, sid, addr, buf, len);
+ if (ret < 0) {
+ pr_err("SPMI read failed, err = %d\n", ret);
+ goto done;
+ }
+
+ cnt -= len;
+ buf += len;
+ offset += len;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * spmi_write_data: writes data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: data to be written.
+ * @offset: SPMI address offset to start writing to.
+ * @cnt: The number of bytes to write.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_write_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+ int ret = 0;
+ int len;
+ uint8_t sid;
+ uint16_t addr;
+
+ while (cnt > 0) {
+ sid = (offset >> 16) & 0xF;
+ addr = offset & 0xFFFF;
+ len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+ ret = spmi_ext_register_writel(ctrl, sid, addr, buf, len);
+ if (ret < 0) {
+ pr_err("SPMI write failed, err = %d\n", ret);
+ goto done;
+ }
+
+ cnt -= len;
+ buf += len;
+ offset += len;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct spmi_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus. When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_next_line_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct spmi_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ /* Read the desired number of "items" */
+ if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+ goto done;
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (20-bit address) */
+ cnt = print_to_log(log, "%5.5X ", offset & 0xffff0);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * write_raw_data_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus. When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_raw_data_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+ u8 data[16];
+ struct spmi_log_buffer *log = trans->log;
+
+ int i;
+ int cnt = 0;
+ int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < 80)
+ goto done;
+
+ /* Read the desired number of "items" */
+ if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+ goto done;
+
+ *pcnt -= items_to_read;
+
+ /* Log the data items */
+ for (i = 0; i < items_to_read; ++i) {
+ cnt = print_to_log(log, "0x%2.2X ", data[i]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data across the SPMI bus and saves to the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct spmi_trans *trans)
+{
+ int cnt;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct spmi_log_buffer *log = trans->log;
+ int (*write_to_log)(struct spmi_trans *, int, size_t *);
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (trans->raw_data)
+ write_to_log = write_raw_data_to_log;
+ else
+ write_to_log = write_next_line_to_log;
+
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * spmi_dfs_reg_write: write user's byte array (coded as string) over SPMI.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t spmi_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+
+ struct spmi_trans *trans = file->private_data;
+ u32 offset = trans->offset;
+
+ /* Make a copy of the user data */
+ char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while (sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ /* Perform the SPMI write(s) */
+ ret = spmi_write_data(trans->ctrl, values, offset, cnt);
+
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+ return ret;
+}
+
+/**
+ * spmi_dfs_reg_read: reads value(s) over SPMI and fill user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t spmi_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct spmi_trans *trans = file->private_data;
+ struct spmi_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0)
+ return 0;
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy SPMI register values to user\n");
+ return -EFAULT;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+ return len;
+}
+
+static const struct file_operations spmi_dfs_reg_fops = {
+ .open = spmi_dfs_data_open,
+ .release = spmi_dfs_close,
+ .read = spmi_dfs_reg_read,
+ .write = spmi_dfs_reg_write,
+};
+
+static const struct file_operations spmi_dfs_raw_data_fops = {
+ .open = spmi_dfs_raw_data_open,
+ .release = spmi_dfs_close,
+ .read = spmi_dfs_reg_read,
+ .write = spmi_dfs_reg_write,
+};
+
+/**
+ * spmi_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *spmi_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating SPMI debugfs file-system at\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if ((int)root == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * spmi_dfs_get_root: return a pointer to SPMI debugfs root directory.
+ * @brief return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SPMI transaction, namely: sid, address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *spmi_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = spmi_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * spmi_dfs_add_controller: adds new spmi controller entry
+ * @return zero on success
+ */
+int spmi_dfs_add_controller(struct spmi_controller *ctrl)
+{
+ struct dentry *dir;
+ struct dentry *root;
+ struct dentry *file;
+ struct spmi_ctrl_data *ctrl_data;
+
+ pr_debug("Adding controller %s\n", ctrl->dev.kobj.name);
+ root = spmi_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ /* Allocate transaction data for the controller */
+ ctrl_data = kzalloc(sizeof(*ctrl_data), GFP_KERNEL);
+ if (!ctrl_data)
+ return -ENOMEM;
+
+ dir = debugfs_create_dir(ctrl->dev.kobj.name, root);
+ if (!dir) {
+ pr_err("Error creating entry for spmi controller %s\n",
+ ctrl->dev.kobj.name);
+ goto err_create_dir_failed;
+ }
+
+ ctrl_data->cnt = 1;
+ ctrl_data->ctrl = ctrl;
+
+ file = debugfs_create_u32("count", DFS_MODE, dir, &ctrl_data->cnt);
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE, dir, &ctrl_data->addr);
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, dir, ctrl_data,
+ &spmi_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data_raw", DFS_MODE, dir, ctrl_data,
+ &spmi_dfs_raw_data_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ list_add(&ctrl_data->node, &dbgfs_data.ctrl);
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(dir);
+err_create_dir_failed:
+ kfree(ctrl_data);
+ return -ENOMEM;
+}
+
+static void __exit spmi_dfs_delete_all_ctrl(struct list_head *head)
+{
+ struct list_head *pos, *tmp;
+
+ list_for_each_safe(pos, tmp, head) {
+ struct spmi_ctrl_data *ctrl_data;
+
+ ctrl_data = list_entry(pos, struct spmi_ctrl_data, node);
+ list_del(pos);
+ kfree(ctrl_data);
+ }
+}
+
+static void __exit spmi_dfs_destroy(void)
+{
+ pr_debug("de-initializing spmi debugfs ...");
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return;
+ if (dbgfs_data.root) {
+ debugfs_remove_recursive(dbgfs_data.root);
+ dbgfs_data.root = NULL;
+ spmi_dfs_delete_all_ctrl(&dbgfs_data.ctrl);
+ }
+ mutex_unlock(&dbgfs_data.lock);
+}
+
+module_exit(spmi_dfs_destroy);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_debug_fs");
diff --git a/drivers/spmi/spmi-dbgfs.h b/drivers/spmi/spmi-dbgfs.h
new file mode 100644
index 0000000..0baa4db
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SPMI_DBGFS_H
+#define _SPMI_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+int spmi_dfs_add_controller(struct spmi_controller *ctrl);
+#else
+int spmi_dfs_add_controller(struct spmi_controller *ctrl) { return 0; }
+#endif
+
+#endif /* _SPMI_DBGFS_H */
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 914df95..ad58240 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include "spmi-dbgfs.h"
+
struct spmii_boardinfo {
struct list_head list;
struct spmi_boardinfo board_info;
@@ -755,6 +757,7 @@
list_add_tail(&ctrl->list, &spmi_ctrl_list);
mutex_unlock(&board_lock);
+ spmi_dfs_add_controller(ctrl);
return 0;
exit:
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 2f3f83d..cc9ffaa 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -1369,9 +1369,12 @@
else
line = pdev->id;
- /* Use line number from device tree if present */
- if (pdev->dev.of_node)
- of_property_read_u32(pdev->dev.of_node, "cell-index", &line);
+ /* Use line number from device tree alias if present */
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ line = ret;
+ }
if (unlikely(line < 0 || line >= UART_NR))
return -ENXIO;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f903dd..3679191 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1527,6 +1527,13 @@
} else {
ret = dwc3_gadget_run_stop(dwc, 0);
}
+ } else if (dwc->gadget_driver && !dwc->softconnect &&
+ !dwc->vbus_active) {
+ if (dwc->gadget_driver->disconnect) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc->gadget_driver->disconnect(&dwc->gadget);
+ return 0;
+ }
}
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 6b9295b..85240ef 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -941,23 +941,23 @@
pr_debug("dev:%p port#%d\n", dev, dev->port_num);
- spin_lock(&dev->lock);
- if (!dev->is_open) {
- pr_err("mbim file handler %p is not open", dev);
- spin_unlock(&dev->lock);
- return;
- }
-
cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
if (!cpkt) {
pr_err("Unable to allocate ctrl pkt\n");
- spin_unlock(&dev->lock);
return;
}
pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
memcpy(cpkt->buf, req->buf, len);
+ spin_lock(&dev->lock);
+ if (!dev->is_open) {
+ pr_err("mbim file handler %p is not open", dev);
+ spin_unlock(&dev->lock);
+ mbim_free_ctrl_pkt(cpkt);
+ return;
+ }
+
list_add_tail(&cpkt->list, &dev->cpkt_req_q);
spin_unlock(&dev->lock);
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 23a9499..c6fe765 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -1582,6 +1582,26 @@
return 0;
}
+static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
+{
+ unsigned long flags;
+ int id;
+
+ if (!motg->pdata->pmic_id_irq)
+ return -ENODEV;
+
+ local_irq_save(flags);
+ id = irq_read_line(motg->pdata->pmic_id_irq);
+ local_irq_restore(flags);
+
+ /*
+ * If we can not read ID line state for some reason, treat
+ * it as float. This would prevent MHL discovery and kicking
+ * host mode unnecessarily.
+ */
+ return !!id;
+}
+
static int msm_otg_mhl_register_callback(struct msm_otg *motg,
void (*callback)(int on))
{
@@ -1664,14 +1684,11 @@
static bool msm_chg_mhl_detect(struct msm_otg *motg)
{
bool ret, id;
- unsigned long flags;
if (!motg->mhl_enabled)
return false;
- local_irq_save(flags);
- id = irq_read_line(motg->pdata->pmic_id_irq);
- local_irq_restore(flags);
+ id = msm_otg_read_pmic_id_state(motg);
if (id)
return false;
@@ -2299,13 +2316,10 @@
clear_bit(B_SESS_VLD, &motg->inputs);
} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
if (pdata->pmic_id_irq) {
- unsigned long flags;
- local_irq_save(flags);
- if (irq_read_line(pdata->pmic_id_irq))
+ if (msm_otg_read_pmic_id_state(motg))
set_bit(ID, &motg->inputs);
else
clear_bit(ID, &motg->inputs);
- local_irq_restore(flags);
}
/*
* VBUS initial state is reported after PMIC
@@ -2453,6 +2467,18 @@
motg->chg_type = USB_INVALID_CHARGER;
msm_otg_notify_charger(motg, 0);
msm_otg_reset(otg->phy);
+ /*
+ * There is a small window where ID interrupt
+ * is not monitored during ID detection circuit
+ * switch from ACA to PMIC. Check ID state
+ * before entering into low power mode.
+ */
+ if (!msm_otg_read_pmic_id_state(motg)) {
+ pr_debug("process missed ID intr\n");
+ clear_bit(ID, &motg->inputs);
+ work = 1;
+ break;
+ }
pm_runtime_put_noidle(otg->phy->dev);
/*
* Only if autosuspend was enabled in probe, it will be
@@ -3124,10 +3150,8 @@
struct msm_otg *motg = container_of(w, struct msm_otg,
pmic_id_status_work.work);
int work = 0;
- unsigned long flags;
- local_irq_save(flags);
- if (irq_read_line(motg->pdata->pmic_id_irq)) {
+ if (msm_otg_read_pmic_id_state(motg)) {
if (!test_and_set_bit(ID, &motg->inputs)) {
pr_debug("PMIC: ID set\n");
work = 1;
@@ -3146,7 +3170,6 @@
else
queue_work(system_nrt_wq, &motg->sm_work);
}
- local_irq_restore(flags);
}
diff --git a/drivers/video/msm/mdss/Kconfig b/drivers/video/msm/mdss/Kconfig
index 424455f..56eb90c 100644
--- a/drivers/video/msm/mdss/Kconfig
+++ b/drivers/video/msm/mdss/Kconfig
@@ -11,3 +11,12 @@
---help---
The MDSS HDMI Panel provides support for transmitting TMDS signals of
MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_8334
+ depends on FB_MSM_MDSS_HDMI_PANEL
+ bool 'MHL SII8334 support '
+ default n
+ ---help---
+ Support the HDMI to MHL conversion.
+ MHL (Mobile High-Definition Link) technology
+ uses USB connector to output HDMI content
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index b4bd31e..88a7c45 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -18,5 +18,6 @@
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_8334) += mhl_sii8334.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8f4f4d5..980ed46 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -327,6 +327,26 @@
return ret;
}
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_dsi_on(pdata);
+ break;
+ case MDSS_EVENT_BLANK:
+ rc = mdss_dsi_ctrl_unprepare(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_dsi_off(pdata);
+ break;
+ }
+ return rc;
+}
+
static int mdss_dsi_resource_initialized;
static int __devinit mdss_dsi_probe(struct platform_device *pdev)
@@ -476,9 +496,7 @@
if (!ctrl_pdata)
return -ENOMEM;
- (ctrl_pdata->panel_data).on = mdss_dsi_on;
- (ctrl_pdata->panel_data).off = mdss_dsi_off;
- (ctrl_pdata->panel_data).intf_unprepare = mdss_dsi_ctrl_unprepare;
+ ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
memcpy(&((ctrl_pdata->panel_data).panel_info),
&(panel_data->panel_info),
sizeof(struct mdss_panel_info));
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index 1d7a6fe..1cf3101 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -353,6 +353,23 @@
return ret;
}
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_edp_on(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_edp_off(pdata);
+ break;
+ }
+ return rc;
+}
+
/*
* Converts from EDID struct to mdss_panel_info
*/
@@ -413,8 +430,7 @@
edp_drv->panel_data.panel_info.bl_min = 1;
edp_drv->panel_data.panel_info.bl_max = 255;
- edp_drv->panel_data.on = mdss_edp_on;
- edp_drv->panel_data.off = mdss_edp_off;
+ edp_drv->panel_data.event_handler = mdss_edp_event_handler;
edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
ret = mdss_register_panel(&edp_drv->panel_data);
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index b711fd9..d0361e5 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -325,6 +325,24 @@
return 0;
}
+static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+{
+ struct mdss_panel_data *pdata;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending event=%d for fb%d\n", e, mfd->index);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, e, NULL);
+
+ return 0;
+}
+
static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
@@ -334,6 +352,12 @@
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+ if (ret) {
+ pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_on = mfd->panel_power_on;
@@ -359,6 +383,12 @@
pr_debug("mdss_fb resume index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+ if (ret) {
+ pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
/* resume state var recover */
mfd->op_enable = mfd->suspend.op_enable;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 78f2b9a..b760388 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -93,6 +93,7 @@
u32 bl_scale;
u32 bl_min_lvl;
struct mutex lock;
+ struct mutex ov_lock;
struct platform_device *pdev;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index d932bc9..5d953c5 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -2020,6 +2020,21 @@
return rc;
} /* hdmi_tx_dev_init */
+static int hdmi_tx_event_handler(struct mdss_panel_data *panel_data,
+ int event, void *arg)
+{
+ int rc = 0;
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = hdmi_tx_power_on(panel_data);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = hdmi_tx_power_off(panel_data);
+ break;
+ }
+ return rc;
+}
+
static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc = 0;
@@ -2029,8 +2044,7 @@
return -EINVAL;
}
- hdmi_ctrl->panel_data.on = hdmi_tx_power_on;
- hdmi_ctrl->panel_data.off = hdmi_tx_power_off;
+ hdmi_ctrl->panel_data.event_handler = hdmi_tx_event_handler;
hdmi_ctrl->video_resolution = DEFAULT_VIDEO_RESOLUTION;
rc = hdmi_tx_init_panel_info(hdmi_ctrl->video_resolution,
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index 0a14056..d7c19b4 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include "mdss_io_util.h"
+#define MAX_I2C_CMDS 16
void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
{
u32 in_val;
@@ -382,3 +383,59 @@
return rc;
} /* msm_dss_enable_clk */
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = ®_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 51e9e54..9d78d70 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -16,6 +16,8 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
#ifdef DEBUG
#define DEV_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -97,4 +99,9 @@
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value);
+
#endif /* __MDSS_IO_UTIL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 72871aa..2e92591 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -284,13 +284,13 @@
void mdss_mdp_clk_ctrl(int enable, int isr);
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd);
int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg);
struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index f660375..00f5874 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,11 @@
bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
+
+ if ((bus_ib_quota == 0) && (clk_rate > 0)) {
+ /* allocate min bw for panel cmds if mdp is active */
+ bus_ib_quota = SZ_16M;
+ }
mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
}
if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
@@ -531,9 +536,28 @@
return 0;
}
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
{
struct mdss_panel_data *pdata;
+ if (!ctl || !ctl->mfd)
+ return -ENODEV;
+
+ pdata = dev_get_platdata(&ctl->mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, event, arg);
+
+ return 0;
+}
+
+int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
u32 outsize, temp, off;
@@ -545,12 +569,6 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
if (mdss_mdp_ctl_init(mfd)) {
pr_err("unable to initialize ctl\n");
return -ENODEV;
@@ -568,6 +586,12 @@
ctl->power_on = true;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
+ if (ret) {
+ pr_err("panel power on failed ctl=%d\n", ctl->num);
+ goto start_fail;
+ }
+
if (ctl->start_fnc)
ret = ctl->start_fnc(ctl);
else
@@ -579,17 +603,6 @@
goto start_fail;
}
- /* request bus bandwidth for panel commands */
- ctl->clk_rate = MDP_CLK_DEFAULT_RATE;
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
- ret = pdata->on(pdata);
- if (ret) {
- pr_err("panel power on failed ctl=%d\n", ctl->num);
- goto panel_fail;
- }
-
pr_debug("ctl_num=%d\n", ctl->num);
mixer = ctl->mixer_left;
@@ -617,23 +630,18 @@
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
}
-panel_fail:
- if (ret && ctl->stop_fnc)
- ctl->stop_fnc(ctl);
+
start_fail:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&ctl->lock);
- if (ret) {
+ if (ret)
mdss_mdp_ctl_destroy(mfd);
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
- }
return ret;
}
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd)
{
- struct mdss_panel_data *pdata;
struct mdss_mdp_ctl *ctl;
int ret = 0;
@@ -648,12 +656,6 @@
return -ENODEV;
}
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
ctl = mfd->ctl;
if (!ctl->power_on) {
@@ -663,43 +665,33 @@
pr_debug("ctl_num=%d\n", mfd->ctl->num);
- mdss_mdp_overlay_release_all(mfd);
-
- /* request bus bandwidth for panel commands */
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_lock(&ctl->lock);
- ctl->power_on = false;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (pdata->intf_unprepare)
- ret = pdata->intf_unprepare(pdata);
-
- if (ret)
- pr_err("%s: intf_unprepare failed\n", __func__);
-
if (ctl->stop_fnc)
ret = ctl->stop_fnc(ctl);
else
pr_warn("no stop func for ctl=%d\n", ctl->num);
- if (ret)
+ if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
-
- ret = pdata->off(pdata);
+ } else {
+ ctl->power_on = false;
+ ctl->play_cnt = 0;
+ ctl->clk_rate = 0;
+ mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
+ }
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- ctl->play_cnt = 0;
-
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_unlock(&ctl->lock);
- if (!mfd->ref_cnt)
+ if (!ret && !mfd->ref_cnt) {
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
+ WARN(ret, "unable to close intf %d\n", ctl->intf_num);
mdss_mdp_ctl_destroy(mfd);
+ }
return ret;
}
@@ -926,13 +918,16 @@
return -ENODEV;
}
- if (!ctl->power_on)
- return 0;
-
pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
- if (mutex_lock_interruptible(&ctl->lock))
- return -EINTR;
+ ret = mutex_lock_interruptible(&ctl->lock);
+ if (ret)
+ return ret;
+
+ if (!ctl->power_on) {
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
@@ -992,7 +987,7 @@
mutex_lock(&mdss_mdp_ctl_lock);
for (i = 0; i < MDSS_MDP_MAX_CTL; i++) {
ctl = &mdss_mdp_ctl_list[i];
- if ((ctl->power_on) &&
+ if ((ctl->power_on) && (ctl->mfd) &&
(ctl->mfd->index == fb_num)) {
if (ctl->mixer_left) {
mixer_id[mixer_cnt] = ctl->mixer_left->num;
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 4d3fbf0..9508846 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -201,7 +201,7 @@
static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_video_ctx *ctx;
- int off;
+ int rc, off;
pr_debug("stop ctl=%d\n", ctl->num);
@@ -211,16 +211,27 @@
return -ENODEV;
}
- if (ctx->vsync_handler)
- mdss_mdp_video_set_vsync_handler(ctl, NULL);
-
if (ctx->timegen_en) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+ if (rc == -EBUSY) {
+ pr_debug("intf #%d busy don't turn off\n",
+ ctl->intf_num);
+ return rc;
+ }
+ WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ctx->timegen_en = false;
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_OFF, NULL);
+ WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
}
+ if (ctx->vsync_handler)
+ mdss_mdp_video_set_vsync_handler(ctl, NULL);
+
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
NULL, NULL);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
@@ -288,6 +299,7 @@
static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_video_ctx *ctx;
+ int rc;
pr_debug("kickoff ctl=%d\n", ctl->num);
@@ -306,15 +318,23 @@
if (!ctx->timegen_en) {
int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+ WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
- ctx->timegen_en = true;
wmb();
}
wait_for_completion(&ctx->vsync_comp);
+
+ if (!ctx->timegen_en) {
+ ctx->timegen_en = true;
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_ON, NULL);
+ WARN(rc, "intf %d timegen on error (%d)\n", ctl->intf_num, rc);
+ }
if (!ctx->vsync_handler)
mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
mutex_unlock(&ctx->vsync_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 569e381..ca8b2f9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,8 @@
#define CHECK_BOUNDS(offset, size, max_size) \
(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+static atomic_t ov_active_panels = ATOMIC_INIT(0);
+
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
@@ -356,8 +358,14 @@
{
int ret;
- if (!mfd->panel_power_on)
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
return -EPERM;
+ }
if (req->flags & MDSS_MDP_ROT_ONLY) {
ret = mdss_mdp_overlay_rotator_setup(mfd, req);
@@ -372,6 +380,8 @@
req->z_order -= MDSS_MDP_STAGE_0;
}
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -411,35 +421,18 @@
return 0;
}
-static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe, *tmp;
- struct msm_fb_data_type *mfd = ctl->mfd;
- int i, ret;
+ LIST_HEAD(destroy_pipes);
+ int i;
- if (mfd->kickoff_fnc)
- ret = mfd->kickoff_fnc(ctl);
- else
- ret = mdss_mdp_display_commit(ctl, NULL);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- complete(&mfd->update.comp);
- mutex_lock(&mfd->no_update.lock);
- if (mfd->no_update.timer.function)
- del_timer(&(mfd->no_update.timer));
-
- mfd->no_update.timer.expires = jiffies + (2 * HZ);
- add_timer(&mfd->no_update.timer);
- mutex_unlock(&mfd->no_update.lock);
-
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
- list_del(&pipe->cleanup_list);
+ list_move(&pipe->cleanup_list, &destroy_pipes);
for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
-
- mdss_mdp_pipe_destroy(pipe);
}
if (!list_empty(&mfd->pipes_used)) {
@@ -458,36 +451,44 @@
}
}
mutex_unlock(&mfd->lock);
+ list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+ mdss_mdp_pipe_destroy(pipe);
+ mutex_unlock(&mfd->ov_lock);
+
+ return 0;
+}
+
+static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+{
+ struct msm_fb_data_type *mfd = ctl->mfd;
+ int ret;
+
+ if (mfd->kickoff_fnc)
+ ret = mfd->kickoff_fnc(ctl);
+ else
+ ret = mdss_mdp_display_commit(ctl, NULL);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ complete(&mfd->update.comp);
+ mutex_lock(&mfd->no_update.lock);
+ if (mfd->no_update.timer.function)
+ del_timer(&(mfd->no_update.timer));
+
+ mfd->no_update.timer.expires = jiffies + (2 * HZ);
+ add_timer(&mfd->no_update.timer);
+ mutex_unlock(&mfd->no_update.lock);
+
+ ret = mdss_mdp_overlay_cleanup(mfd);
return ret;
}
-static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
{
struct mdss_mdp_pipe *pipe;
- int i, ret = 0;
u32 pipe_ndx, unset_ndx = 0;
-
- if (!mfd || !mfd->ctl)
- return -ENODEV;
-
- pr_debug("unset ndx=%x\n", ndx);
-
- if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(ndx);
- if (rot) {
- mdss_mdp_rotator_finish(rot);
- } else {
- pr_warn("unknown session id=%x\n", ndx);
- ret = -ENODEV;
- }
-
- return ret;
- }
-
- if (!mfd->ctl->power_on)
- return 0;
+ int i;
for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
pipe_ndx = BIT(i);
@@ -505,37 +506,59 @@
mdss_mdp_mixer_pipe_unstage(pipe);
}
}
+ return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+ int ret = 0;
+
+ if (!mfd || !mfd->ctl)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ pr_debug("unset ndx=%x\n", ndx);
+
+ if (ndx & MDSS_MDP_ROT_SESSION_MASK)
+ ret = mdss_mdp_rotator_release(ndx);
+ else
+ ret = mdss_mdp_overlay_release(mfd, ndx);
+
+ mutex_unlock(&mfd->ov_lock);
return ret;
}
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+static int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
u32 unset_ndx = 0;
int cnt = 0;
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
- if (!list_empty(&mfd->pipes_used)) {
- list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- if (pipe->ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(pipe->ndx);
- if (rot)
- mdss_mdp_rotator_finish(rot);
- } else {
- unset_ndx |= pipe->ndx;
- cnt++;
- }
- }
+ list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+ unset_ndx |= pipe->ndx;
+ cnt++;
}
mutex_unlock(&mfd->lock);
if (unset_ndx) {
pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
- mdss_mdp_overlay_unset(mfd, unset_ndx);
- mdss_mdp_overlay_kickoff(mfd->ctl);
+ mdss_mdp_overlay_release(mfd, unset_ndx);
}
+ mutex_unlock(&mfd->ov_lock);
+
+ if (cnt)
+ mdss_mdp_overlay_kickoff(mfd->ctl);
return 0;
}
@@ -562,6 +585,12 @@
struct mdss_mdp_data src_data, dst_data;
int ret;
+ rot = mdss_mdp_rotator_session_get(req->id);
+ if (!rot) {
+ pr_err("invalid session id=%x\n", req->id);
+ return -ENOENT;
+ }
+
ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1);
if (ret) {
pr_err("src_data pmem error\n");
@@ -574,13 +603,6 @@
goto rotate_done;
}
- rot = mdss_mdp_rotator_session_get(req->id);
- if (!rot) {
- pr_err("invalid session id=%x\n", req->id);
- ret = -ENODEV;
- goto rotate_done;
- }
-
ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
if (ret) {
pr_err("rotator queue error session id=%x\n", req->id);
@@ -625,9 +647,6 @@
ctl = pipe->mixer->ctl;
mdss_mdp_pipe_unlock(pipe);
- if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL))
- ret = mdss_mdp_overlay_kickoff(ctl);
-
return ret;
}
@@ -638,14 +657,29 @@
pr_debug("play req id=%x\n", req->id);
- if (!mfd->panel_power_on)
- return -EPERM;
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
- if (req->id & MDSS_MDP_ROT_SESSION_MASK)
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
ret = mdss_mdp_overlay_rotate(mfd, req);
- else
+ } else {
ret = mdss_mdp_overlay_queue(mfd, req);
+ if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL)) {
+ mutex_unlock(&mfd->ov_lock);
+ ret = mdss_mdp_overlay_kickoff(mfd->ctl);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -719,10 +753,7 @@
u32 offset;
int bpp, ret;
- if (!mfd)
- return;
-
- if (!mfd->ctl || !mfd->panel_power_on)
+ if (!mfd || !mfd->ctl)
return;
fbi = mfd->fbi;
@@ -732,6 +763,14 @@
return;
}
+ if (mutex_lock_interruptible(&mfd->ov_lock))
+ return;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return;
+ }
+
memset(&data, 0, sizeof(data));
bpp = fbi->var.bits_per_pixel / 8;
@@ -782,6 +821,7 @@
return;
}
}
+ mutex_unlock(&mfd->ov_lock);
if (fbi->var.activate & FB_ACTIVATE_VBL)
mdss_mdp_overlay_kickoff(mfd->ctl);
@@ -1076,10 +1116,36 @@
return ret;
}
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ rc = mdss_mdp_ctl_on(mfd);
+ if (rc == 0)
+ atomic_inc(&ov_active_panels);
+
+ return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ mdss_mdp_overlay_release_all(mfd);
+
+ rc = mdss_mdp_ctl_off(mfd);
+ if (rc == 0) {
+ if (atomic_dec_return(&ov_active_panels) == 0)
+ mdss_mdp_rotator_release_all();
+ }
+
+ return rc;
+}
+
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
- mfd->on_fnc = mdss_mdp_ctl_on;
- mfd->off_fnc = mdss_mdp_ctl_off;
+ mfd->on_fnc = mdss_mdp_overlay_on;
+ mfd->off_fnc = mdss_mdp_overlay_off;
mfd->hw_refresh = true;
mfd->do_histogram = NULL;
mfd->overlay_play_enable = true;
@@ -1092,6 +1158,7 @@
INIT_LIST_HEAD(&mfd->pipes_used);
INIT_LIST_HEAD(&mfd->pipes_cleanup);
+ mutex_init(&mfd->ov_lock);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 3b04633..042b5e9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -268,9 +268,7 @@
atomic_read(&pipe->ref_cnt));
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- mutex_lock(&mdss_mdp_sspp_lock);
mdss_mdp_pipe_free(pipe);
- mutex_unlock(&mdss_mdp_sspp_lock);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index dc1cb0d..647fddc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,12 +187,17 @@
{
struct mdss_mdp_pipe *rot_pipe;
struct mdss_mdp_ctl *ctl;
- int ret;
+ int ret, need_wait = false;
- if (!rot)
+ ret = mutex_lock_interruptible(&rotator_lock);
+ if (ret)
+ return ret;
+
+ if (!rot || !rot->ref_cnt) {
+ mutex_unlock(&rotator_lock);
return -ENODEV;
+ }
- mutex_lock(&rotator_lock);
ret = mdss_mdp_rotator_pipe_dequeue(rot);
if (ret) {
pr_err("unable to acquire rotator\n");
@@ -225,16 +230,18 @@
ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
+ if (ret == 0 && !rot->no_wait)
+ need_wait = true;
done:
mutex_unlock(&rotator_lock);
- if (!rot->no_wait)
+ if (need_wait)
mdss_mdp_rotator_busy_wait(rot);
return ret;
}
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
{
struct mdss_mdp_pipe *rot_pipe;
@@ -243,7 +250,6 @@
pr_debug("finish rot id=%x\n", rot->session_id);
- mutex_lock(&rotator_lock);
rot_pipe = rot->pipe;
if (rot_pipe) {
mdss_mdp_rotator_busy_wait(rot);
@@ -255,7 +261,43 @@
mdss_mdp_pipe_destroy(rot_pipe);
mdss_mdp_wb_mixer_destroy(mixer);
}
+
+ return 0;
+}
+
+int mdss_mdp_rotator_release(u32 ndx)
+{
+ struct mdss_mdp_rotator_session *rot;
+ mutex_lock(&rotator_lock);
+ rot = mdss_mdp_rotator_session_get(ndx);
+ if (rot) {
+ mdss_mdp_rotator_finish(rot);
+ } else {
+ pr_warn("unknown session id=%x\n", ndx);
+ return -ENOENT;
+ }
mutex_unlock(&rotator_lock);
return 0;
}
+
+int mdss_mdp_rotator_release_all(void)
+{
+ struct mdss_mdp_rotator_session *rot;
+ int i, cnt;
+
+ mutex_lock(&rotator_lock);
+ for (i = 0, cnt = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+ rot = &rotator_session[i];
+ if (rot->ref_cnt) {
+ mdss_mdp_rotator_finish(rot);
+ cnt++;
+ }
+ }
+ mutex_unlock(&rotator_lock);
+
+ if (cnt)
+ pr_debug("cleaned up %d rotator sessions\n", cnt);
+
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index eb5b47a..cc4e339 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -48,7 +48,8 @@
int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
-int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release_all(void);
#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 5cdfe34..28d7051 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -55,6 +55,17 @@
MAX_PHYS_TARGET_NUM,
};
+enum mdss_intf_events {
+ MDSS_EVENT_RESET,
+ MDSS_EVENT_UNBLANK,
+ MDSS_EVENT_TIMEGEN_ON,
+ MDSS_EVENT_BLANK,
+ MDSS_EVENT_TIMEGEN_OFF,
+ MDSS_EVENT_CLOSE,
+ MDSS_EVENT_SUSPEND,
+ MDSS_EVENT_RESUME,
+};
+
/* panel info type */
struct lcd_panel_info {
u32 vsync_enable;
@@ -178,14 +189,11 @@
struct mdss_panel_data {
struct mdss_panel_info panel_info;
- void (*set_backlight) (struct mdss_panel_data *pdata,
- u32 bl_level);
- int (*intf_unprepare) (struct mdss_panel_data *pdata);
+ void (*set_backlight) (struct mdss_panel_data *pdata, u32 bl_level);
unsigned char *mmss_cc_base;
/* function entry chain */
- int (*on) (struct mdss_panel_data *pdata);
- int (*off) (struct mdss_panel_data *pdata);
+ int (*event_handler) (struct mdss_panel_data *pdata, int e, void *arg);
};
int mdss_register_panel(struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index d4c924f..c3dc06b 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,15 +25,10 @@
#include "mdss_panel.h"
-static int mdss_wb_on(struct mdss_panel_data *pdata)
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
{
- pr_debug("%s\n", __func__);
- return 0;
-}
-
-static int mdss_wb_off(struct mdss_panel_data *pdata)
-{
- pr_debug("%s\n", __func__);
+ pr_debug("%s: event=%d\n", __func__, event);
return 0;
}
@@ -75,8 +70,7 @@
pdata->panel_info.pdest = DISPLAY_3;
pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
- pdata->on = mdss_wb_on;
- pdata->off = mdss_wb_off;
+ pdata->event_handler = mdss_wb_event_handler;
pdev->dev.platform_data = pdata;
rc = mdss_register_panel(pdata);
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
new file mode 100644
index 0000000..6a63964
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -0,0 +1,1184 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/mhl_8334.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_io_util.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+
+#define pr_debug_intr(...) pr_debug("\n")
+
+enum mhl_gpio_type {
+ MHL_TX_RESET_GPIO,
+ MHL_TX_INTR_GPIO,
+ MHL_TX_PMIC_PWR_GPIO,
+ MHL_TX_MAX_GPIO,
+};
+
+enum mhl_vreg_type {
+ MHL_TX_3V_VREG,
+ MHL_TX_MAX_VREG,
+};
+
+struct mhl_tx_platform_data {
+ /* Data filled from device tree nodes */
+ struct dss_gpio *gpios[MHL_TX_MAX_GPIO];
+ struct dss_vreg *vregs[MHL_TX_MAX_VREG];
+ int irq;
+};
+
+struct mhl_tx_ctrl {
+ struct platform_device *pdev;
+ struct mhl_tx_platform_data *pdata;
+ struct i2c_client *i2c_handle;
+ uint8_t cur_state;
+ uint8_t chip_rev_id;
+ int mhl_mode;
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+ DEV_PAGE_TPI_0 ,
+ DEV_PAGE_TX_L0_0 ,
+ DEV_PAGE_TX_L1_0 ,
+ DEV_PAGE_TX_2_0 ,
+ DEV_PAGE_TX_3_0 ,
+ DEV_PAGE_CBUS ,
+ DEV_PAGE_DDC_EDID ,
+ DEV_PAGE_DDC_SEGM ,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+ enum mhl_st_type to_mode);
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl,
+ uint8_t to_state);
+
+static int mhl_i2c_reg_read(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset)
+{
+ int rc = -1;
+ uint8_t buffer = 0;
+
+ rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+ reg_offset, &buffer);
+ if (rc) {
+ pr_err("%s: slave=%x, off=%x\n",
+ __func__, slave_addrs[slave_addr_index], reg_offset);
+ return rc;
+ }
+ return buffer;
+}
+
+
+static int mhl_i2c_reg_write(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t value)
+{
+ return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+ reg_offset, &value);
+}
+
+static void mhl_i2c_reg_modify(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t mask, uint8_t val)
+{
+ uint8_t temp;
+
+ temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+ temp &= (~mask);
+ temp |= (mask & val);
+ mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+ struct mhl_tx_platform_data *pdata)
+{
+ int i, rc = 0;
+ struct device_node *of_node = NULL;
+ struct dss_gpio *temp_gpio = NULL;
+ i = 0;
+
+ if (!dev || !pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+ if (!of_node) {
+ pr_err("%s: invalid of_node\n", __func__);
+ goto error;
+ }
+
+ pr_debug("%s: id=%d\n", __func__, dev->id);
+
+ /* GPIOs */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ /* RESET */
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+ pr_debug("%s: rst gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+ /* PWR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+ pr_debug("%s: pmic gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+ /* INTR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+ pr_debug("%s: intr gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+ return 0;
+error:
+ pr_err("%s: ret due to err\n", __func__);
+ for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+ if (pdata->gpios[i])
+ devm_kfree(dev, pdata->gpios[i]);
+ return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ gpio_set_value(mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+ on);
+ return 0;
+}
+
+static void cbus_reset(struct i2c_client *client)
+{
+ uint8_t i;
+
+ /*
+ * REG_SRST
+ */
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+ msleep(20);
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+ /*
+ * REG_INTR1 and REG_INTR4
+ */
+ MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+ MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+ BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+ /* Unmask CBUS1 Intrs */
+ MHL_SII_CBUS_WR(0x0009,
+ BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ /* Unmask CBUS2 Intrs */
+ MHL_SII_CBUS_WR(0x001F, BIT2 | BIT3);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Enable WRITE_STAT interrupt for writes to
+ * all 4 MSC Status registers.
+ */
+ MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+ /*
+ * Enable SET_INT interrupt for writes to
+ * all 4 MSC Interrupt registers.
+ */
+ MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+ }
+ return;
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+ uint8_t regval;
+
+ /* Increase DDC translation layer timer*/
+ MHL_SII_CBUS_WR(0x0007, 0xF2);
+ /* Drive High Time */
+ MHL_SII_CBUS_WR(0x0036, 0x03);
+ /* Use programmed timing */
+ MHL_SII_CBUS_WR(0x0039, 0x30);
+ /* CBUS Drive Strength */
+ MHL_SII_CBUS_WR(0x0040, 0x03);
+ /*
+ * Write initial default settings
+ * to devcap regs: default settings
+ */
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+ MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+ DEVCAP_VAL_VID_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_AUD_LINK_MODE,
+ DEVCAP_VAL_AUD_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+ DEVCAP_VAL_SCRATCHPAD_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_INT_STAT_SIZE,
+ DEVCAP_VAL_INT_STAT_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+ /* Make bits 2,3 (initiator timeout) to 1,1
+ * for register CBUS_LINK_CONTROL_2
+ * REG_CBUS_LINK_CONTROL_2
+ */
+ regval = MHL_SII_CBUS_RD(0x0031);
+ regval = (regval | 0x0C);
+ /* REG_CBUS_LINK_CONTROL_2 */
+ MHL_SII_CBUS_WR(0x0031, regval);
+ /* REG_MSC_TIMEOUT_LIMIT */
+ MHL_SII_CBUS_WR(0x0022, 0x0F);
+ /* REG_CBUS_LINK_CONTROL_1 */
+ MHL_SII_CBUS_WR(0x0030, 0x01);
+ /* disallow vendor specific commands */
+ MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct i2c_client *client, bool mhl_disc_en)
+{
+ /*
+ * ============================================
+ * POWER UP
+ * ============================================
+ */
+
+ /* Power up 1.2V core */
+ MHL_SII_PAGE1_WR(0x003D, 0x3F);
+ /*
+ * Wait for the source power to be enabled
+ * before enabling pll clocks.
+ */
+ msleep(50);
+ /* Enable Tx PLL Clock */
+ MHL_SII_PAGE2_WR(0x0011, 0x01);
+ /* Enable Tx Clock Path and Equalizer */
+ MHL_SII_PAGE2_WR(0x0012, 0x11);
+ /* Tx Source Termination ON */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ /* Enable 1X MHL Clock output */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xAC);
+ /* Tx Differential Driver Config */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xD9);
+ /* PLL Bandwidth Control */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x02);
+ /*
+ * ============================================
+ * Analog PLL Control
+ * ============================================
+ */
+ /* Enable Rx PLL clock */
+ MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL, 0x00);
+ MHL_SII_PAGE0_WR(0x00F8, 0x0C);
+ MHL_SII_PAGE0_WR(0x0085, 0x02);
+ MHL_SII_PAGE2_WR(0x0000, 0x00);
+ MHL_SII_PAGE2_WR(0x0013, 0x60);
+ /* PLL Cal ref sel */
+ MHL_SII_PAGE2_WR(0x0017, 0x03);
+ /* VCO Cal */
+ MHL_SII_PAGE2_WR(0x001A, 0x20);
+ /* Auto EQ */
+ MHL_SII_PAGE2_WR(0x0022, 0xE0);
+ MHL_SII_PAGE2_WR(0x0023, 0xC0);
+ MHL_SII_PAGE2_WR(0x0024, 0xA0);
+ MHL_SII_PAGE2_WR(0x0025, 0x80);
+ MHL_SII_PAGE2_WR(0x0026, 0x60);
+ MHL_SII_PAGE2_WR(0x0027, 0x40);
+ MHL_SII_PAGE2_WR(0x0028, 0x20);
+ MHL_SII_PAGE2_WR(0x0029, 0x00);
+ /* Rx PLL Bandwidth 4MHz */
+ MHL_SII_PAGE2_WR(0x0031, 0x0A);
+ /* Rx PLL Bandwidth value from I2C */
+ MHL_SII_PAGE2_WR(0x0045, 0x06);
+ MHL_SII_PAGE2_WR(0x004B, 0x06);
+ /* Manual zone control */
+ MHL_SII_PAGE2_WR(0x004C, 0xE0);
+ /* PLL Mode value */
+ MHL_SII_PAGE2_WR(0x004D, 0x00);
+ MHL_SII_PAGE0_WR(0x0008, 0x35);
+ /*
+ * Discovery Control and Status regs
+ * Setting De-glitch time to 50 ms (default)
+ * Switch Control Disabled
+ */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+ /* 1.8V CBUS VTH */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x55);
+ /* RGND and single Discovery attempt */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+ /* Ignore VBUS */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+
+ /* Enable CBUS Discovery */
+ if (mhl_disc_en) {
+ /* Enable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+ /* Pull-up resistance off for IDLE state */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0xA4);
+ } else {
+ /* Disable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+ }
+
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+ /* MHL CBUS Discovery - immediate comm. */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+
+ /* Enable Auto Soft RESET */
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+ /* HDMI Transcode mode enable */
+ MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+ cbus_reset(client);
+ init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ switch (to_mode) {
+ case POWER_STATE_D0_NO_MHL:
+ break;
+ case POWER_STATE_D0_MHL:
+ mhl_init_reg_settings(client, true);
+ /* REG_DISC_CTRL1 */
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+ /* TPI_DEVICE_POWER_STATE_CTRL_REG */
+ mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+ 0x00);
+ break;
+ case POWER_STATE_D3:
+ if (mhl_ctrl->cur_state == POWER_STATE_D3)
+ break;
+
+ /* Force HPD to 0 when not in MHL mode. */
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ /*
+ * Change TMDS termination to high impedance
+ * on disconnection.
+ */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+ msleep(50);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+ MHL_SII_PAGE3_MOD(0x003D, BIT0,
+ 0x00);
+ mhl_ctrl->cur_state = POWER_STATE_D3;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+ if (to_state == HPD_UP) {
+ /*
+ * Drive HPD to UP state
+ *
+ * The below two reg configs combined
+ * enable TMDS output.
+ */
+
+ /* Enable TMDS on TMDS_CCTRL */
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+
+ /*
+ * Set HPD_OUT_OVR_EN = HPD State
+ * EDID read and Un-force HPD (from low)
+ * propogate to src let HPD float by clearing
+ * HPD OUT OVRRD EN
+ */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0x00);
+ } else {
+ /*
+ * Drive HPD to DOWN state
+ * Disable TMDS Output on REG_TMDS_CCTRL
+ * Enable/Disable TMDS output (MHL TMDS output only)
+ */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, BIT4);
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+ }
+ return;
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t val;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ pr_debug("%s: cur st [0x%x]\n", __func__,
+ mhl_ctrl->cur_state);
+
+ if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+ /* Already in D0 - MHL power state */
+ pr_err("%s: cur st not D0\n", __func__);
+ return;
+ }
+ /* spin_lock_irqsave(&mhl_state_lock, flags); */
+ mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+ /* spin_unlock_irqrestore(&mhl_state_lock, flags); */
+
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ MHL_SII_CBUS_WR(0x07, 0xF2);
+
+ /*
+ * Keep the discovery enabled. Need RGND interrupt
+ * Possibly chip disables discovery after MHL_EST??
+ * Need to re-enable here
+ */
+ val = MHL_SII_PAGE3_RD(0x10);
+ MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+ return;
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ /*
+ * MHL TX CTL1
+ * Disabling Tx termination
+ */
+ MHL_SII_PAGE3_WR(0x30, 0xD0);
+
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ /*
+ * Only if MHL-USB handshake is not implemented
+ */
+ mhl_init_reg_settings(client, true);
+ return;
+}
+
+static int mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t rgnd_imp;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ /* DISC STATUS REG 2 */
+ rgnd_imp = (mhl_i2c_reg_read(client,
+ TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
+ pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+ if (0x02 == rgnd_imp) {
+ pr_debug("%s: mhl sink\n", __func__);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT0, BIT0);
+ mhl_ctrl->mhl_mode = 1;
+ } else {
+ pr_debug("%s: non-mhl sink\n", __func__);
+ mhl_ctrl->mhl_mode = 0;
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT3, BIT3);
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ }
+ return mhl_ctrl->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*disable discovery*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+ /* force USB ID switch to open*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+ /* force HPD to 0 when not in mhl mode. */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ msleep(50);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+ uint8_t tmds_cstat;
+ uint8_t mhl_fifo_status;
+
+ /* tmds cstat */
+ tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+ pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+ tmds_cstat);
+
+ if (!(tmds_cstat & BIT1))
+ return;
+
+ mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+ pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+ mhl_fifo_status);
+ if (mhl_fifo_status & 0x0C) {
+ MHL_SII_REG_NAME_WR(REG_INTR5, 0x0C);
+ pr_debug("%s: mhl fifo rst\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+ }
+}
+
+
+static void dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t status, reg ;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* INTR_STATUS4 */
+ status = MHL_SII_REG_NAME_RD(REG_INTR4);
+ pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+ if ((0x00 == status) &&\
+ (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+ pr_err("%s: invalid intr\n", __func__);
+ return;
+ }
+
+ if (0xFF == status) {
+ pr_debug("%s: invalid intr 0xff\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+ return;
+ }
+
+ if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+ pr_debug("%s: scdt intr\n", __func__);
+ scdt_st_chg(client);
+ }
+
+ if (status & BIT1)
+ pr_debug("mhl: int4 bit1 set\n");
+
+ /* mhl_est interrupt */
+ if (status & BIT2) {
+ pr_debug("%s: mhl_est st=%02X\n", __func__,
+ (int) status);
+ mhl_msm_connection(mhl_ctrl);
+ } else if (status & BIT3) {
+ pr_debug("%s: uUSB-a type dev detct\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_DISC_STAT2, 0x80);
+ switch_mode(mhl_ctrl, POWER_STATE_D3);
+ }
+
+ if (status & BIT5) {
+ /* clr intr - reg int4 */
+ pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+ (int)status);
+ reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+ MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+ mhl_msm_disconnection(mhl_ctrl);
+ }
+
+ if ((mhl_ctrl->cur_state != POWER_STATE_D0_MHL) &&\
+ (status & BIT6)) {
+ /* rgnd rdy Intr */
+ pr_debug("%s: rgnd ready intr\n", __func__);
+ switch_mode(mhl_ctrl, POWER_STATE_D0_MHL);
+ mhl_msm_read_rgnd_int(mhl_ctrl);
+ }
+
+ /* Can't succeed at these in D3 */
+ if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&\
+ (status & BIT4)) {
+ /* cbus lockout interrupt?
+ * Hardware detection mechanism figures that
+ * CBUS line is latched and raises this intr
+ * where we force usb switch open and release
+ */
+ pr_warn("%s: cbus locked out!\n", __func__);
+ force_usb_switch_open(mhl_ctrl);
+ release_usb_switch_open(mhl_ctrl);
+ }
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+
+ return;
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_5_stat;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*
+ * Clear INT 5
+ * INTR5 is related to FIFO underflow/overflow reset
+ * which is handled in 8334 by auto FIFO reset
+ */
+ intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+ MHL_SII_REG_NAME_WR(REG_INTR5, intr_5_stat);
+}
+
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_1_stat;
+ uint8_t cbus_stat;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* INTR STATUS 1 */
+ intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+ if (!intr_1_stat)
+ return;
+
+ /* Clear interrupts */
+ MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+ if (BIT6 & intr_1_stat) {
+ /*
+ * HPD status change event is pending
+ * Read CBUS HPD status for this info
+ * MSC REQ ABRT REASON
+ */
+ cbus_stat = MHL_SII_CBUS_RD(0x0D);
+ if (BIT6 & cbus_stat)
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ }
+ return;
+}
+
+static void clear_all_intrs(struct i2c_client *client)
+{
+ uint8_t regval = 0x00;
+
+ pr_debug_intr("********* exiting isr mask check ?? *************\n");
+ pr_debug_intr("int1 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR1));
+ pr_debug_intr("int3 mask = %02X\n",
+ (int) MHL_SII_PAGE0_RD(0x0077));
+ pr_debug_intr("int4 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR4));
+ pr_debug_intr("int5 mask = %02X\n",
+ (int) MHL_SII_REG_NAME_RD(REG_INTR5));
+ pr_debug_intr("cbus1 mask = %02X\n",
+ (int) MHL_SII_CBUS_RD(0x0009));
+ pr_debug_intr("cbus2 mask = %02X\n",
+ (int) MHL_SII_CBUS_RD(0x001F));
+ pr_debug_intr("********* end of isr mask check *************\n");
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR1);
+ pr_debug_intr("int1 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR1, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR2);
+ pr_debug_intr("int2 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR2, regval);
+
+ regval = MHL_SII_PAGE0_RD(0x0073);
+ pr_debug_intr("int3 st = %02X\n", (int)regval);
+ MHL_SII_PAGE0_WR(0x0073, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR4);
+ pr_debug_intr("int4 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR4, regval);
+
+ regval = MHL_SII_REG_NAME_RD(REG_INTR5);
+ pr_debug_intr("int5 st = %02X\n", (int)regval);
+ MHL_SII_REG_NAME_WR(REG_INTR5, regval);
+
+ regval = MHL_SII_CBUS_RD(0x0008);
+ pr_debug_intr("cbusInt st = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x0008, regval);
+
+ regval = MHL_SII_CBUS_RD(0x001E);
+ pr_debug_intr("CBUS intR_2: %d\n", (int)regval);
+ MHL_SII_CBUS_WR(0x001E, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A0);
+ pr_debug_intr("A0 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A1);
+ pr_debug_intr("A1 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A2);
+ pr_debug_intr("A2 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00A3);
+ pr_debug_intr("A3 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00A3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B0);
+ pr_debug_intr("B0 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B1);
+ pr_debug_intr("B1 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B2);
+ pr_debug_intr("B2 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00B3);
+ pr_debug_intr("B3 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00B3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E0);
+ pr_debug_intr("E0 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E1);
+ pr_debug_intr("E1 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E2);
+ pr_debug_intr("E2 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00E3);
+ pr_debug_intr("E3 st set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00E3, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F0);
+ pr_debug_intr("F0 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F0, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F1);
+ pr_debug_intr("F1 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F1, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F2);
+ pr_debug_intr("F2 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F2, regval);
+
+ regval = MHL_SII_CBUS_RD(0x00F3);
+ pr_debug_intr("F3 int set = %02X\n", (int)regval);
+ MHL_SII_CBUS_WR(0x00F3, regval);
+ pr_debug_intr("********* end of exiting in isr *************\n");
+}
+
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+ pr_debug("%s: Getting Interrupts\n", __func__);
+
+ /*
+ * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+ * interrupts. In D3, we get only RGND
+ */
+ dev_detect_isr(mhl_ctrl);
+
+ pr_debug("%s: cur pwr state is [0x%x]\n",
+ __func__, mhl_ctrl->cur_state);
+ if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+ /*
+ * If dev_detect_isr() didn't move the tx to D3
+ * on disconnect, continue to check other
+ * interrupt sources.
+ */
+ mhl_misc_isr(mhl_ctrl);
+
+ /*
+ * Check for any peer messages for DCAP_CHG etc
+ * Dispatch to have the CBUS module working only
+ * once connected.
+ mhl_cbus_isr(mhl_ctrl);
+ */
+ mhl_hpd_stat_isr(mhl_ctrl);
+ }
+
+ clear_all_intrs(mhl_ctrl->i2c_handle);
+
+ return IRQ_HANDLED;
+}
+
+static int mhl_tx_chip_init(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t chip_rev_id = 0x00;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* Reset the TX chip */
+ mhl_sii_reset_pin(mhl_ctrl, 0);
+ msleep(20);
+ mhl_sii_reset_pin(mhl_ctrl, 1);
+ /* TX PR-guide requires a 100 ms wait here */
+ msleep(100);
+
+ /* Read the chip rev ID */
+ chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+ pr_debug("MHL: chip rev ID read=[%x]\n", chip_rev_id);
+
+ /*
+ * Need to disable MHL discovery if
+ * MHL-USB handshake is implemented
+ */
+ mhl_init_reg_settings(client, true);
+ return 0;
+}
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+ static struct regulator *reg_8941_l24;
+ static struct regulator *reg_8941_l02;
+ int rc;
+
+ pr_debug("Inside %s\n", __func__);
+ if (!reg_8941_l24) {
+ reg_8941_l24 = regulator_get(&client->dev,
+ "avcc_18");
+ if (IS_ERR(reg_8941_l24)) {
+ pr_err("could not get reg_8038_l20, rc = %ld\n",
+ PTR_ERR(reg_8941_l24));
+ return -ENODEV;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l24);
+ else
+ rc = regulator_disable(reg_8941_l24);
+ if (rc) {
+ pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+ "avcc_1.8V", enable, rc);
+ return rc;
+ } else {
+ pr_debug("%s: vreg L24 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ if (!reg_8941_l02) {
+ reg_8941_l02 = regulator_get(&client->dev,
+ "avcc_12");
+ if (IS_ERR(reg_8941_l02)) {
+ pr_err("could not get reg_8941_l02, rc = %ld\n",
+ PTR_ERR(reg_8941_l02));
+ return -ENODEV;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l02);
+ else
+ rc = regulator_disable(reg_8941_l02);
+ if (rc) {
+ pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+ "avcc_1.2V", enable, rc);
+ return rc;
+ } else {
+ pr_debug("%s: vreg L02 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ return rc;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+ int ret;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+ pr_debug("%s\n", __func__);
+ if (on) {
+ ret = gpio_request(pwr_gpio,
+ mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: mhl pwr gpio req failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = gpio_direction_output(pwr_gpio, 1);
+ if (ret < 0) {
+ pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = mhl_sii_reg_config(client, true);
+ if (ret) {
+ pr_err("%s: regulator enable failed\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: mhl sii power on successful\n", __func__);
+ } else {
+ pr_warn("%s: turning off pwr controls\n", __func__);
+ mhl_sii_reg_config(client, false);
+ gpio_free(pwr_gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ int ret;
+ struct dss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+ /* caused too many line spills */
+ temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+ temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+ if (on) {
+ if (gpio_is_valid(temp_reset_gpio->gpio)) {
+ ret = gpio_request(temp_reset_gpio->gpio,
+ temp_reset_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+ __func__, temp_reset_gpio->gpio, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+ if (ret < 0) {
+ pr_err("%s: set dirn rst failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ }
+ if (gpio_is_valid(temp_intr_gpio->gpio)) {
+ ret = gpio_request(temp_intr_gpio->gpio,
+ temp_intr_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: intr_gpio req failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_input(temp_intr_gpio->gpio);
+ if (ret < 0) {
+ pr_err("%s: set dirn intr failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+ temp_intr_gpio->gpio);
+ pr_debug("%s: gpio_to_irq=%d\n",
+ __func__, mhl_ctrl->i2c_handle->irq);
+ }
+ } else {
+ pr_warn("%s: freeing gpios\n", __func__);
+ gpio_free(temp_intr_gpio->gpio);
+ gpio_free(temp_reset_gpio->gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct mhl_tx_platform_data *pdata = NULL;
+ struct mhl_tx_ctrl *mhl_ctrl;
+
+ mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+ if (!mhl_ctrl) {
+ pr_err("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ rc = mhl_tx_get_dt_data(&client->dev, pdata);
+ if (rc) {
+ pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+ __func__, rc);
+ goto failed_dt_data;
+ }
+ mhl_ctrl->i2c_handle = client;
+ mhl_ctrl->pdata = pdata;
+ i2c_set_clientdata(client, mhl_ctrl);
+ }
+
+ /*
+ * Regulator init
+ */
+ rc = mhl_vreg_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: vreg init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ /*
+ * GPIO init
+ */
+ rc = mhl_gpio_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: gpio init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ /*
+ * Other initializations
+ * such tx specific
+ */
+ rc = mhl_tx_chip_init(mhl_ctrl);
+ if (rc) {
+ pr_err("%s: tx chip init failed [%d]\n",
+ __func__, rc);
+ goto failed_probe;
+ }
+
+ pr_debug("%s: IRQ from GPIO INTR = %d\n",
+ __func__, mhl_ctrl->i2c_handle->irq);
+ pr_debug("%s: Driver name = [%s]\n", __func__,
+ client->dev.driver->name);
+ rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+ &mhl_tx_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->dev.driver->name, mhl_ctrl);
+ if (rc) {
+ pr_err("request_threaded_irq failed, status: %d\n",
+ rc);
+ goto failed_probe;
+ } else {
+ pr_debug("request_threaded_irq succeeded\n");
+ }
+ pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+ return 0;
+failed_probe:
+failed_dt_data:
+ if (pdata)
+ devm_kfree(&client->dev, pdata);
+failed_no_mem:
+ if (mhl_ctrl)
+ devm_kfree(&client->dev, mhl_ctrl);
+ pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+ if (!mhl_ctrl) {
+ pr_warn("%s: i2c get client data failed\n", __func__);
+ return -EINVAL;
+ }
+
+ free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+ mhl_gpio_config(mhl_ctrl, 0);
+ mhl_vreg_config(mhl_ctrl, 0);
+ if (mhl_ctrl->pdata)
+ devm_kfree(&client->dev, mhl_ctrl->pdata);
+ devm_kfree(&client->dev, mhl_ctrl);
+ return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+ { MHL_DRIVER_NAME, 0 },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+static struct of_device_id mhl_match_table[] = {
+ {.compatible = COMPATIBLE_NAME,},
+ { },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+ .driver = {
+ .name = MHL_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mhl_match_table,
+ },
+ .probe = mhl_i2c_probe,
+ .remove = mhl_i2c_remove,
+ .id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index cb9d7fa..c9f57c5 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -130,4 +130,162 @@
DEV_PAGE_DDC_SEGM = (0x60),
};
+#define MHL_SII_PAGE0_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_L0, off)
+#define MHL_SII_PAGE0_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_L0, off, val)
+#define MHL_SII_PAGE0_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_L0, off, mask, val)
+
+
+#define MHL_SII_PAGE1_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_L1, off)
+#define MHL_SII_PAGE1_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_L1, off, val)
+#define MHL_SII_PAGE1_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_L1, off, mask, val)
+
+
+#define MHL_SII_PAGE2_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_2, off)
+#define MHL_SII_PAGE2_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_2, off, val)
+#define MHL_SII_PAGE2_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_2, off, mask, val)
+
+
+#define MHL_SII_PAGE3_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_3, off)
+#define MHL_SII_PAGE3_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_3, off, val)
+#define MHL_SII_PAGE3_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_3, off, mask, val)
+
+#define MHL_SII_CBUS_RD(off) \
+ mhl_i2c_reg_read(client, TX_PAGE_CBUS, off)
+#define MHL_SII_CBUS_WR(off, val) \
+ mhl_i2c_reg_write(client, TX_PAGE_CBUS, off, val)
+#define MHL_SII_CBUS_MOD(off, mask, val) \
+ mhl_i2c_reg_modify(client, TX_PAGE_CBUS, off, mask, val)
+
+#define REG_SRST ((TX_PAGE_3 << 16) | 0x0000)
+#define REG_INTR1 ((TX_PAGE_L0 << 16) | 0x0071)
+#define REG_INTR1_MASK ((TX_PAGE_L0 << 16) | 0x0075)
+#define REG_INTR2 ((TX_PAGE_L0 << 16) | 0x0072)
+#define REG_TMDS_CCTRL ((TX_PAGE_L0 << 16) | 0x0080)
+
+#define REG_DISC_CTRL1 ((TX_PAGE_3 << 16) | 0x0010)
+#define REG_DISC_CTRL2 ((TX_PAGE_3 << 16) | 0x0011)
+#define REG_DISC_CTRL3 ((TX_PAGE_3 << 16) | 0x0012)
+#define REG_DISC_CTRL4 ((TX_PAGE_3 << 16) | 0x0013)
+#define REG_DISC_CTRL5 ((TX_PAGE_3 << 16) | 0x0014)
+#define REG_DISC_CTRL6 ((TX_PAGE_3 << 16) | 0x0015)
+#define REG_DISC_CTRL7 ((TX_PAGE_3 << 16) | 0x0016)
+#define REG_DISC_CTRL8 ((TX_PAGE_3 << 16) | 0x0017)
+#define REG_DISC_CTRL9 ((TX_PAGE_3 << 16) | 0x0018)
+#define REG_DISC_CTRL10 ((TX_PAGE_3 << 16) | 0x0019)
+#define REG_DISC_CTRL11 ((TX_PAGE_3 << 16) | 0x001A)
+#define REG_DISC_STAT ((TX_PAGE_3 << 16) | 0x001B)
+#define REG_DISC_STAT2 ((TX_PAGE_3 << 16) | 0x001C)
+
+#define REG_INT_CTRL ((TX_PAGE_3 << 16) | 0x0020)
+#define REG_INTR4 ((TX_PAGE_3 << 16) | 0x0021)
+#define REG_INTR4_MASK ((TX_PAGE_3 << 16) | 0x0022)
+#define REG_INTR5 ((TX_PAGE_3 << 16) | 0x0023)
+#define REG_INTR5_MASK ((TX_PAGE_3 << 16) | 0x0024)
+
+#define REG_MHLTX_CTL1 ((TX_PAGE_3 << 16) | 0x0030)
+#define REG_MHLTX_CTL2 ((TX_PAGE_3 << 16) | 0x0031)
+#define REG_MHLTX_CTL3 ((TX_PAGE_3 << 16) | 0x0032)
+#define REG_MHLTX_CTL4 ((TX_PAGE_3 << 16) | 0x0033)
+#define REG_MHLTX_CTL5 ((TX_PAGE_3 << 16) | 0x0034)
+#define REG_MHLTX_CTL6 ((TX_PAGE_3 << 16) | 0x0035)
+#define REG_MHLTX_CTL7 ((TX_PAGE_3 << 16) | 0x0036)
+#define REG_MHLTX_CTL8 ((TX_PAGE_3 << 16) | 0x0037)
+
+#define REG_TMDS_CSTAT ((TX_PAGE_3 << 16) | 0x0040)
+
+#define REG_CBUS_INTR_ENABLE ((TX_PAGE_CBUS << 16) | 0x0009)
+
+#define REG_DDC_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000B)
+#define REG_CBUS_BUS_STATUS ((TX_PAGE_CBUS << 16) | 0x000A)
+#define REG_PRI_XFR_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000D)
+#define REG_CBUS_PRI_FWR_ABORT_REASON ((TX_PAGE_CBUS << 16) | 0x000E)
+#define REG_CBUS_PRI_START ((TX_PAGE_CBUS << 16) | 0x0012)
+#define REG_CBUS_PRI_ADDR_CMD ((TX_PAGE_CBUS << 16) | 0x0013)
+#define REG_CBUS_PRI_WR_DATA_1ST ((TX_PAGE_CBUS << 16) | 0x0014)
+#define REG_CBUS_PRI_WR_DATA_2ND ((TX_PAGE_CBUS << 16) | 0x0015)
+#define REG_CBUS_PRI_RD_DATA_1ST ((TX_PAGE_CBUS << 16) | 0x0016)
+#define REG_CBUS_PRI_RD_DATA_2ND ((TX_PAGE_CBUS << 16) | 0x0017)
+#define REG_CBUS_PRI_VS_CMD ((TX_PAGE_CBUS << 16) | 0x0018)
+#define REG_CBUS_PRI_VS_DATA ((TX_PAGE_CBUS << 16) | 0x0019)
+#define REG_CBUS_MSC_RETRY_INTERVAL ((TX_PAGE_CBUS << 16) | 0x001A)
+#define REG_CBUS_DDC_FAIL_LIMIT ((TX_PAGE_CBUS << 16) | 0x001C)
+#define REG_CBUS_MSC_FAIL_LIMIT ((TX_PAGE_CBUS << 16) | 0x001D)
+#define REG_CBUS_MSC_INT2_STATUS ((TX_PAGE_CBUS << 16) | 0x001E)
+#define REG_CBUS_MSC_INT2_ENABLE ((TX_PAGE_CBUS << 16) | 0x001F)
+#define REG_MSC_WRITE_BURST_LEN ((TX_PAGE_CBUS << 16) | 0x0020)
+#define REG_MSC_HEARTBEAT_CONTROL ((TX_PAGE_CBUS << 16) | 0x0021)
+#define REG_MSC_TIMEOUT_LIMIT ((TX_PAGE_CBUS << 16) | 0x0022)
+#define REG_CBUS_LINK_CONTROL_1 ((TX_PAGE_CBUS << 16) | 0x0030)
+#define REG_CBUS_LINK_CONTROL_2 ((TX_PAGE_CBUS << 16) | 0x0031)
+#define REG_CBUS_LINK_CONTROL_3 ((TX_PAGE_CBUS << 16) | 0x0032)
+#define REG_CBUS_LINK_CONTROL_4 ((TX_PAGE_CBUS << 16) | 0x0033)
+#define REG_CBUS_LINK_CONTROL_5 ((TX_PAGE_CBUS << 16) | 0x0034)
+#define REG_CBUS_LINK_CONTROL_6 ((TX_PAGE_CBUS << 16) | 0x0035)
+#define REG_CBUS_LINK_CONTROL_7 ((TX_PAGE_CBUS << 16) | 0x0036)
+#define REG_CBUS_LINK_STATUS_1 ((TX_PAGE_CBUS << 16) | 0x0037)
+#define REG_CBUS_LINK_STATUS_2 ((TX_PAGE_CBUS << 16) | 0x0038)
+#define REG_CBUS_LINK_CONTROL_8 ((TX_PAGE_CBUS << 16) | 0x0039)
+#define REG_CBUS_LINK_CONTROL_9 ((TX_PAGE_CBUS << 16) | 0x003A)
+#define REG_CBUS_LINK_CONTROL_10 ((TX_PAGE_CBUS << 16) | 0x003B)
+#define REG_CBUS_LINK_CONTROL_11 ((TX_PAGE_CBUS << 16) | 0x003C)
+#define REG_CBUS_LINK_CONTROL_12 ((TX_PAGE_CBUS << 16) | 0x003D)
+
+
+#define REG_CBUS_LINK_CTRL9_0 ((TX_PAGE_CBUS << 16) | 0x003A)
+#define REG_CBUS_LINK_CTRL9_1 ((TX_PAGE_CBUS << 16) | 0x00BA)
+
+#define REG_CBUS_DRV_STRENGTH_0 ((TX_PAGE_CBUS << 16) | 0x0040)
+#define REG_CBUS_DRV_STRENGTH_1 ((TX_PAGE_CBUS << 16) | 0x0041)
+#define REG_CBUS_ACK_CONTROL ((TX_PAGE_CBUS << 16) | 0x0042)
+#define REG_CBUS_CAL_CONTROL ((TX_PAGE_CBUS << 16) | 0x0043)
+
+#define REG_CBUS_SCRATCHPAD_0 ((TX_PAGE_CBUS << 16) | 0x00C0)
+#define REG_CBUS_DEVICE_CAP_0 ((TX_PAGE_CBUS << 16) | 0x0080)
+#define REG_CBUS_DEVICE_CAP_1 ((TX_PAGE_CBUS << 16) | 0x0081)
+#define REG_CBUS_DEVICE_CAP_2 ((TX_PAGE_CBUS << 16) | 0x0082)
+#define REG_CBUS_DEVICE_CAP_3 ((TX_PAGE_CBUS << 16) | 0x0083)
+#define REG_CBUS_DEVICE_CAP_4 ((TX_PAGE_CBUS << 16) | 0x0084)
+#define REG_CBUS_DEVICE_CAP_5 ((TX_PAGE_CBUS << 16) | 0x0085)
+#define REG_CBUS_DEVICE_CAP_6 ((TX_PAGE_CBUS << 16) | 0x0086)
+#define REG_CBUS_DEVICE_CAP_7 ((TX_PAGE_CBUS << 16) | 0x0087)
+#define REG_CBUS_DEVICE_CAP_8 ((TX_PAGE_CBUS << 16) | 0x0088)
+#define REG_CBUS_DEVICE_CAP_9 ((TX_PAGE_CBUS << 16) | 0x0089)
+#define REG_CBUS_DEVICE_CAP_A ((TX_PAGE_CBUS << 16) | 0x008A)
+#define REG_CBUS_DEVICE_CAP_B ((TX_PAGE_CBUS << 16) | 0x008B)
+#define REG_CBUS_DEVICE_CAP_C ((TX_PAGE_CBUS << 16) | 0x008C)
+#define REG_CBUS_DEVICE_CAP_D ((TX_PAGE_CBUS << 16) | 0x008D)
+#define REG_CBUS_DEVICE_CAP_E ((TX_PAGE_CBUS << 16) | 0x008E)
+#define REG_CBUS_DEVICE_CAP_F ((TX_PAGE_CBUS << 16) | 0x008F)
+#define REG_CBUS_SET_INT_0 ((TX_PAGE_CBUS << 16) | 0x00A0)
+#define REG_CBUS_SET_INT_1 ((TX_PAGE_CBUS << 16) | 0x00A1)
+#define REG_CBUS_SET_INT_2 ((TX_PAGE_CBUS << 16) | 0x00A2)
+#define REG_CBUS_SET_INT_3 ((TX_PAGE_CBUS << 16) | 0x00A3)
+#define REG_CBUS_WRITE_STAT_0 ((TX_PAGE_CBUS << 16) | 0x00B0)
+#define REG_CBUS_WRITE_STAT_1 ((TX_PAGE_CBUS << 16) | 0x00B1)
+#define REG_CBUS_WRITE_STAT_2 ((TX_PAGE_CBUS << 16) | 0x00B2)
+#define REG_CBUS_WRITE_STAT_3 ((TX_PAGE_CBUS << 16) | 0x00B3)
+
+#define GET_PAGE(x) (x >> 16)
+#define GET_OFF(x) (x & 0xffff)
+
+
+#define MHL_SII_REG_NAME_RD(arg)\
+ mhl_i2c_reg_read(client, GET_PAGE(arg), GET_OFF(arg))
+#define MHL_SII_REG_NAME_WR(arg, val)\
+ mhl_i2c_reg_write(client, GET_PAGE(arg), GET_OFF(arg), val)
+#define MHL_SII_REG_NAME_MOD(arg, mask, val)\
+ mhl_i2c_reg_modify(client, GET_PAGE(arg), GET_OFF(arg), mask, val)
+
#endif /* __MHL_MSM_H__ */
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
new file mode 100644
index 0000000..613cd9f
--- /dev/null
+++ b/include/linux/msm_ipa.h
@@ -0,0 +1,714 @@
+#ifndef _MSM_IPA_H_
+#define _MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_NAT_DMA 24
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_GET_ASYNC_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_MAX 29
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 20
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS (1ul << 0)
+#define IPA_FLT_PROTOCOL (1ul << 1)
+#define IPA_FLT_SRC_ADDR (1ul << 2)
+#define IPA_FLT_DST_ADDR (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE (1ul << 6)
+#define IPA_FLT_CODE (1ul << 7)
+#define IPA_FLT_SPI (1ul << 8)
+#define IPA_FLT_SRC_PORT (1ul << 9)
+#define IPA_FLT_DST_PORT (1ul << 10)
+#define IPA_FLT_TC (1ul << 11)
+#define IPA_FLT_FLOW_LABEL (1ul << 12)
+#define IPA_FLT_NEXT_HDR (1ul << 13)
+#define IPA_FLT_META_DATA (1ul << 14)
+#define IPA_FLT_FRAGMENT (1ul << 15)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+ IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC2_PROD,
+ IPA_CLIENT_HSIC3_PROD,
+ IPA_CLIENT_HSIC4_PROD,
+ IPA_CLIENT_HSIC5_PROD,
+ IPA_CLIENT_USB_PROD,
+ IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+ IPA_CLIENT_A2_EMBEDDED_PROD,
+ IPA_CLIENT_A2_TETHERED_PROD,
+ IPA_CLIENT_A5_LAN_WAN_PROD,
+ IPA_CLIENT_A5_CMD_PROD,
+ IPA_CLIENT_Q6_LAN_PROD,
+
+ IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC2_CONS,
+ IPA_CLIENT_HSIC3_CONS,
+ IPA_CLIENT_HSIC4_CONS,
+ IPA_CLIENT_HSIC5_CONS,
+ IPA_CLIENT_USB_CONS,
+ IPA_CLIENT_A2_EMBEDDED_CONS,
+ IPA_CLIENT_A2_TETHERED_CONS,
+ IPA_CLIENT_A5_LAN_WAN_CONS,
+ IPA_CLIENT_Q6_LAN_CONS,
+
+ IPA_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+ IPA_IP_v4,
+ IPA_IP_v6,
+ IPA_IP_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., A5): 5'd3
+ */
+enum ipa_flt_action {
+ IPA_PASS_TO_ROUTING,
+ IPA_PASS_TO_SRC_NAT,
+ IPA_PASS_TO_DST_NAT,
+ IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+ uint32_t attrib_mask;
+ uint16_t src_port_lo;
+ uint16_t src_port_hi;
+ uint16_t dst_port_lo;
+ uint16_t dst_port_hi;
+ uint8_t type;
+ uint8_t code;
+ uint32_t spi;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint32_t meta_data;
+ uint32_t meta_data_mask;
+ union {
+ struct {
+ uint8_t tos;
+ uint8_t protocol;
+ uint32_t src_addr;
+ uint32_t src_addr_mask;
+ uint32_t dst_addr;
+ uint32_t dst_addr_mask;
+ } v4;
+ struct {
+ uint8_t tc;
+ uint32_t flow_label;
+ uint8_t next_hdr;
+ uint32_t src_addr[4];
+ uint32_t src_addr_mask[4];
+ uint32_t dst_addr[4];
+ uint32_t dst_addr_mask[4];
+ } v6;
+ } u;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ */
+struct ipa_flt_rule {
+ enum ipa_flt_action action;
+ uint32_t rt_tbl_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+ it is not an index or an offset
+ * @attrib: attributes of the rule
+ */
+struct ipa_rt_rule {
+ enum ipa_client_type dst;
+ uint32_t hdr_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out paramerer, handle to header, valid when status is 0
+ * @status: out paramerer, status of header add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_add {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+ uint32_t hdr_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr: all headers need to go here back to
+ * back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+ uint8_t commit;
+ uint8_t num_hdrs;
+ struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr: out parameter, contents of specified header,
+ * valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ * valid only when ioctl return val is non-negative
+ * @is_partial: out parameter, indicates whether specified header is partial
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_copy_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl: out parameter, handle of header entry
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status: out parameter, status of header remove operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+ uint8_t commit;
+ uint8_t num_hdls;
+ struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status: output parameter, status of route rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of filtering rule add operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+ struct ipa_flt_rule rule;
+ uint8_t at_rear;
+ uint32_t flt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep: which "clients" pipe does this rule apply to?
+ * valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ enum ipa_client_type ep;
+ uint8_t global;
+ uint8_t num_rules;
+ struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status: output parameter, status of filtering rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_flt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl: output parameter, handle of routing table, valid only when ioctl
+ * return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+ enum ipa_ip_type ip;
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props: output parameter, number of tx properties
+ * valid only when ioctl return val is non-negative
+ * @num_rx_props: output parameter, number of rx properties
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_query_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_tx_props;
+ uint32_t num_rx_props;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type dst_pipe;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ */
+struct ipa_ioc_rx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type src_pipe;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+ uint8_t tbl_index;
+ uint32_t ipv4_rules_offset;
+ uint32_t expn_rules_offset;
+
+ uint32_t index_offset;
+ uint32_t index_expn_offset;
+
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+ uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+ uint8_t table_index;
+ uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr: type of table, from which the base address of the table
+ * can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+ uint8_t table_index;
+ uint8_t base_addr;
+
+ uint32_t offset;
+ uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+ uint8_t entries;
+ struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @msg_len: the length of the message in bytes
+ * @rsvd: reserved bits for future use.
+ *
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * buffer of atleast this size in an continuous loop, call will block when there
+ * is no pending async message.
+ *
+ * After reading a message's meta-data using above scheme, client should issue a
+ * GET_MSG IOCTL to actually read the message itself into the buffer of
+ * "msg_len" immediately following the ipa_msg_meta itself in the IOCTL payload
+ */
+struct ipa_msg_meta {
+ uint8_t msg_type;
+ uint16_t msg_len;
+ uint8_t rsvd;
+};
+
+/**
+ * actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_RT_TBL, \
+ uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_HDR, \
+ uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_SET_FLT, \
+ uint32_t)
+#define IPA_IOC_GET_ASYNC_MSG _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_ASYNC_MSG, \
+ struct ipa_msg_meta *)
+
+#endif /* _MSM_IPA_H_ */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 07179e9..4376ece 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -6123,6 +6123,11 @@
/* Band cut equalizer effect.*/
#define ASM_PARAM_EQ_BAND_CUT 6
+/* Voice get & set params */
+#define VOICE_CMD_SET_PARAM 0x0001133D
+#define VOICE_CMD_GET_PARAM 0x0001133E
+#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
/* ERROR CODES */
/* Success. The operation completed with no errors. */
diff --git a/lib/Kconfig b/lib/Kconfig
index f1621d5..8437e36 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -387,4 +387,11 @@
The kernel drivers receive the QMI message over a transport
and then decode it into a C structure.
+config QMI_ENCDEC_DEBUG
+ bool
+ help
+ Kernel config option to enable debugging QMI Encode/Decode
+ library. This will log the information regarding the element
+ and message being encoded & decoded.
+
endmenu
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
index d759885..40273d0 100644
--- a/lib/qmi_encdec.c
+++ b/lib/qmi_encdec.c
@@ -25,6 +25,61 @@
#define TLV_LEN_SIZE sizeof(uint16_t)
#define TLV_TYPE_SIZE sizeof(uint8_t)
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+ const u8 *ptr = buf; \
+ int i, linelen, remaining = buf_len; \
+ int rowsize = 16, groupsize = 1; \
+ unsigned char linebuf[256]; \
+ for (i = 0; i < buf_len; i += rowsize) { \
+ linelen = min(remaining, rowsize); \
+ remaining -= linelen; \
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+ linebuf, sizeof(linebuf), false); \
+ pr_debug("%s: %s\n", prefix_str, linebuf); \
+ } \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
static int _qmi_kernel_encode(struct elem_info *ei_array,
void *out_buf, void *in_c_struct,
int enc_level);
@@ -232,6 +287,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_encode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+ temp_ei->elem_size, buf_src);
UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
encoded_bytes, tlv_len, encode_tlv, rc);
break;
@@ -253,6 +310,7 @@
if (encode_tlv && enc_level == 1) {
QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
tlv_pointer = buf_dst;
tlv_len = 0;
@@ -260,6 +318,7 @@
encode_tlv = 0;
}
}
+ QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
return encoded_bytes;
}
@@ -419,11 +478,13 @@
void *buf_src = in_buf;
int rc;
+ QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
while (decoded_bytes < in_buf_len) {
if (dec_level == 1) {
tlv_pointer = buf_src;
QMI_ENCDEC_DECODE_TLV(&tlv_type,
&tlv_len, tlv_pointer);
+ QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
temp_ei = find_ei(ei_array, tlv_type);
@@ -470,6 +531,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_decode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+ temp_ei->elem_size, buf_dst);
UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
break;
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 59e220d..76cd625 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -1702,6 +1702,19 @@
return 0;
}
+
+static int mdm9615_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ pr_debug("%s()\n", __func__);
+ rate->min = rate->max = 48000;
+
+ return 0;
+}
+
static int mdm9615_aux_pcm_get_gpios(void)
{
int ret = 0;
@@ -2134,6 +2147,43 @@
.be_hw_params_fixup = mdm9615_auxpcm_be_params_fixup,
.ops = &mdm9615_sec_auxpcm_be_ops,
},
+ /* Incall Music BACK END DAI Link */
+ {
+ .name = LPASS_BE_VOICE_PLAYBACK_TX,
+ .stream_name = "Voice Farend Playback",
+ .cpu_dai_name = "msm-dai-q6.32773",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Uplink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_TX,
+ .stream_name = "Voice Uplink Capture",
+ .cpu_dai_name = "msm-dai-q6.32772",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Downlink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_RX,
+ .stream_name = "Voice Downlink Capture",
+ .cpu_dai_name = "msm-dai-q6.32771",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ .ignore_pmdown_time = 1, /* this dailink has playback support */
+ },
};
static struct snd_soc_dai_link mdm9615_dai_i2s_tabla[] = {
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 6acc136..cc69123 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -120,10 +120,10 @@
}
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
+ __func__);
if (rtac_make_adm_callback(
payload, data->payload_size)) {
- pr_debug("%s: payload[0]: 0x%x\n",
- __func__, payload[0]);
break;
}
case ADM_CMD_DEVICE_CLOSE_V5:
@@ -148,6 +148,20 @@
wake_up(&this_adm.wait[index]);
}
break;
+ case ADM_CMD_GET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* ADM_CMDRSP_GET_PP_PARAMS_V5 */
+ if (payload[1] != 0) {
+ pr_err("%s: ADM get param error = %d, resuming\n",
+ __func__, payload[1]);
+ rtac_make_adm_callback(payload,
+ data->payload_size);
+ }
+ break;
default:
pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
payload[0]);
@@ -174,8 +188,11 @@
wake_up(&this_adm.wait[index]);
}
break;
- case ADM_CMD_GET_PP_PARAMS_V5:
- pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n", __func__);
+ case ADM_CMDRSP_GET_PP_PARAMS_V5:
+ pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
+ if (payload[0] != 0)
+ pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
+ __func__, payload[0]);
rtac_make_adm_callback(payload,
data->payload_size);
break;
@@ -669,6 +686,11 @@
for (i = 0; i < num_copps; i++)
send_adm_cal(port_id[i], path);
+ for (i = 0; i < num_copps; i++)
+ rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id
+ [afe_get_port_index(port_id[i])]),
+ path, session_id);
+
fail_cmd:
kfree(matrix_map);
return ret;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 2d52c43..fd340cf 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -932,6 +932,10 @@
__func__, payload[0], payload[1]);
if (data->opcode == APR_BASIC_RSP_RESULT) {
token = data->token;
+ if (payload[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, payload[0], payload[1]);
+ }
switch (payload[0]) {
case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
if (rtac_make_asm_callback(ac->session, payload,
@@ -965,6 +969,20 @@
ac->cb(data->opcode, data->token,
(uint32_t *)data->payload, ac->priv);
break;
+ case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
+ pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 */
+ if (payload[1] != 0) {
+ pr_err("%s: ASM get param error = %d, resuming\n",
+ __func__, payload[1]);
+ rtac_make_asm_callback(ac->session, payload,
+ data->payload_size);
+ }
+ break;
default:
pr_debug("%s:command[0x%x] not expecting rsp\n",
__func__, payload[0]);
@@ -1008,6 +1026,10 @@
break;
}
case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+ pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2\n", __func__);
+ if (payload[0] != 0)
+ pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n",
+ __func__, payload[0]);
rtac_make_asm_callback(ac->session, payload,
data->payload_size);
break;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 338cfe3..b799e59 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
#include <mach/socinfo.h>
+#include <mach/qdsp6v2/apr_tal.h>
#include "sound/apr_audio-v2.h"
#include "sound/q6afe-v2.h"
@@ -208,6 +209,8 @@
static int voice_apr_register(void)
{
+ void *modem_mvm, *modem_cvs, *modem_cvp;
+
pr_debug("%s\n", __func__);
mutex_lock(&common.common_lock);
@@ -224,6 +227,18 @@
pr_err("%s: Unable to register MVM\n", __func__);
goto err;
}
+
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_mvm = apr_register("MODEM", "MVM",
+ qdsp_mvm_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_mvm == NULL)
+ pr_err("%s: Unable to register MVM for MODEM\n",
+ __func__);
}
if (common.apr_q6_cvs == NULL) {
@@ -237,6 +252,18 @@
pr_err("%s: Unable to register CVS\n", __func__);
goto err;
}
+ rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvs = apr_register("MODEM", "CVS",
+ qdsp_cvs_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvs == NULL)
+ pr_err("%s: Unable to register CVS for MODEM\n",
+ __func__);
}
@@ -251,6 +278,18 @@
pr_err("%s: Unable to register CVP\n", __func__);
goto err;
}
+ rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvp = apr_register("MODEM", "CVP",
+ qdsp_cvp_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvp == NULL)
+ pr_err("%s: Unable to register CVP for MODEM\n",
+ __func__);
}
@@ -262,6 +301,7 @@
if (common.apr_q6_cvs != NULL) {
apr_deregister(common.apr_q6_cvs);
common.apr_q6_cvs = NULL;
+ rtac_set_voice_handle(RTAC_CVS, NULL);
}
if (common.apr_q6_mvm != NULL) {
apr_deregister(common.apr_q6_mvm);
@@ -605,8 +645,9 @@
cvs_handle = voice_get_cvs_handle(v);
/* MVM, CVS sessions are destroyed only for Full control sessions. */
- if (is_voip_session(v->session_id)) {
- pr_debug("%s: MVM detach stream\n", __func__);
+ if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__,
+ v->voc_state);
/* Detach voice stream. */
detach_stream.hdr.hdr_field =
@@ -2176,6 +2217,10 @@
if (v->rec_info.rec_enable)
voice_cvs_start_record(v, v->rec_info.rec_mode);
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
return 0;
@@ -2526,6 +2571,7 @@
goto fail;
}
+ rtac_remove_voice(voice_get_cvs_handle(v));
cvp_handle = 0;
voice_set_cvp_handle(v, cvp_handle);
return 0;
@@ -3281,6 +3327,7 @@
mutex_lock(&v->lock);
if (v->voc_state == VOC_RUN) {
+ rtac_remove_voice(voice_get_cvs_handle(v));
/* send cmd to dsp to disable vocproc */
ret = voice_send_disable_vocproc_cmd(v);
if (ret < 0) {
@@ -3324,32 +3371,36 @@
voice_send_cvp_register_cal_cmd(v);
voice_send_cvp_register_vol_cal_cmd(v);
- ret = voice_send_enable_vocproc_cmd(v);
- if (ret < 0) {
- pr_err("%s: enable vocproc failed %d\n", __func__, ret);
- goto fail;
- }
+ ret = voice_send_enable_vocproc_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: enable vocproc failed %d\n", __func__, ret);
+ goto fail;
+ }
- /* Send tty mode if tty device is used */
- voice_send_tty_mode_cmd(v);
+ /* Send tty mode if tty device is used */
+ voice_send_tty_mode_cmd(v);
- /* enable widevoice if wv_enable is set */
- if (v->wv_enable)
- voice_send_set_widevoice_enable_cmd(v);
+ /* enable widevoice if wv_enable is set */
+ if (v->wv_enable)
+ voice_send_set_widevoice_enable_cmd(v);
- /* enable slowtalk */
- if (v->st_enable)
- voice_send_set_pp_enable_cmd(v,
+ /* enable slowtalk */
+ if (v->st_enable)
+ voice_send_set_pp_enable_cmd(v,
MODULE_ID_VOICE_MODULE_ST,
v->st_enable);
- /* enable FENS */
- if (v->fens_enable)
- voice_send_set_pp_enable_cmd(v,
+ /* enable FENS */
+ if (v->fens_enable)
+ voice_send_set_pp_enable_cmd(v,
MODULE_ID_VOICE_MODULE_FENS,
v->fens_enable);
- v->voc_state = VOC_RUN;
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
+ v->voc_state = VOC_RUN;
}
fail:
@@ -3702,7 +3753,9 @@
mutex_lock(&v->lock);
- if (v->voc_state == VOC_RUN) {
+ if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
+
ret = voice_destroy_vocproc(v);
if (ret < 0)
pr_err("%s: destroy voice failed\n", __func__);
@@ -3727,6 +3780,13 @@
mutex_lock(&v->lock);
+ if (v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC in ERR state\n", __func__);
+
+ voice_destroy_mvm_cvs_session(v);
+ v->voc_state = VOC_INIT;
+ }
+
if ((v->voc_state == VOC_INIT) ||
(v->voc_state == VOC_RELEASE)) {
ret = voice_apr_register();
@@ -3817,6 +3877,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3825,6 +3886,36 @@
c = priv;
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received MODEM reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+ apr_reset(c->apr_q6_mvm);
+ c->apr_q6_mvm = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].mvm_handle = 0;
+ }
+ return 0;
+ }
+
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
v = voice_get_session(data->dest_port);
@@ -3834,23 +3925,6 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_mvm);
- c->apr_q6_mvm = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].mvm_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
@@ -3935,6 +4009,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3944,6 +4019,35 @@
c = priv;
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvs);
+ c->apr_q6_cvs = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvs_handle = 0;
+ }
+ return 0;
+ }
v = voice_get_session(data->dest_port);
if (v == NULL) {
@@ -3952,28 +4056,15 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvs);
- c->apr_q6_cvs = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvs_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
pr_info("%x %x\n", ptr[0], ptr[1]);
+ if (ptr[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, ptr[0], ptr[1]);
+ }
/*response from CVS */
switch (ptr[0]) {
case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
@@ -4010,6 +4101,24 @@
wake_up(&v->cvs_wait);
break;
case VOICE_CMD_SET_PARAM:
+ pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+ rtac_make_voice_callback(RTAC_CVS, ptr,
+ data->payload_size);
+ break;
+ case VOICE_CMD_GET_PARAM:
+ pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* VOICE_EVT_GET_PARAM_ACK */
+ if (ptr[1] != 0) {
+ pr_err("%s: CVP get param error = %d, resuming\n",
+ __func__, ptr[1]);
+ rtac_make_voice_callback(RTAC_CVP,
+ data->payload,
+ data->payload_size);
+ }
break;
default:
pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
@@ -4125,7 +4234,16 @@
pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
- } else
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ ptr = data->payload;
+ if (ptr[0] != 0) {
+ pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+ __func__, ptr[0]);
+ }
+ rtac_make_voice_callback(RTAC_CVS, data->payload,
+ data->payload_size);
+ } else
pr_err("Unknown opcode 0x%x\n", data->opcode);
fail:
@@ -4138,6 +4256,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -4146,6 +4265,33 @@
c = priv;
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvp);
+ c->apr_q6_cvp = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvp_handle = 0;
+ }
+ return 0;
+ }
+
v = voice_get_session(data->dest_port);
if (v == NULL) {
pr_err("%s: v is NULL\n", __func__);
@@ -4153,28 +4299,15 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvp);
- c->apr_q6_cvp = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvp_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
pr_info("%x %x\n", ptr[0], ptr[1]);
+ if (ptr[1] != 0) {
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, ptr[0], ptr[1]);
+ }
switch (ptr[0]) {
case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2:
/*response from CVP */
@@ -4206,6 +4339,24 @@
wake_up(&v->cvp_wait);
break;
case VOICE_CMD_SET_PARAM:
+ pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+ rtac_make_voice_callback(RTAC_CVP, ptr,
+ data->payload_size);
+ break;
+ case VOICE_CMD_GET_PARAM:
+ pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+ __func__);
+ /* Should only come here if there is an APR */
+ /* error or malformed APR packet. Otherwise */
+ /* response will be returned as */
+ /* VOICE_EVT_GET_PARAM_ACK */
+ if (ptr[1] != 0) {
+ pr_err("%s: CVP get param error = %d, resuming\n",
+ __func__, ptr[1]);
+ rtac_make_voice_callback(RTAC_CVP,
+ data->payload,
+ data->payload_size);
+ }
break;
default:
pr_debug("%s: not match cmd = 0x%x\n",
@@ -4213,6 +4364,15 @@
break;
}
}
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+ ptr = data->payload;
+ if (ptr[0] != 0) {
+ pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+ __func__, ptr[0]);
+ }
+ rtac_make_voice_callback(RTAC_CVP, data->payload,
+ data->payload_size);
}
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 9f82694..aef463f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@
VOC_RUN,
VOC_CHANGE,
VOC_RELEASE,
+ VOC_ERROR,
};
struct mem_buffer {
@@ -884,10 +885,6 @@
#define VSS_MEDIA_ID_4GV_WB_MODEM 0x00010FC4
/*CDMA EVRC-WB vocoder modem format */
-#define VOICE_CMD_SET_PARAM 0x00011006
-#define VOICE_CMD_GET_PARAM 0x00011007
-#define VOICE_EVT_GET_PARAM_ACK 0x00011008
-
#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2 0x000112BF
struct vss_ivocproc_cmd_create_full_control_session_v2_t {