Merge "msm: mdss: hdmi: Replace of_get_gpio usage with of_get_named_gpio"
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 556300d..802716c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -18,8 +18,7 @@
- qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
images and self-authentication is not desired;
<1> if the hardware requires self-authenticating images.
-- qcom,is_loadable: <0> if PIL should not load the modem image
- <1> if PIL is required to load the modem image
+- qcom,is-loadable: if PIL is required to load the modem image
Example:
qcom,mss@fc880000 {
@@ -34,7 +33,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..86c60e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,81 @@
+Qualcomm Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+ registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+ "bam-base" - string to identify the IPA BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+ "bam-irq" - string to identify the IPA BAM interrupt.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+ 0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+ destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size: Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+ size of the IPA pipe memory region.
+ Pipe memory is a feature which may be supported by the
+ target (HW platform). The Driver support using pipe
+ memory instead of system memory. In case this property
+ will not appear in the IPA DTS entry, the driver will
+ use system memory.
+
+Example:
+
+qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index 5e7c42a..43033a8 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
- qcom, msm_bus,name: Should be "qseecom-noc"
- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
- qcom, msm_bus,num_paths: The paths for source and destination ports
@@ -10,6 +11,8 @@
Example:
qcom,qseecom@fe806000 {
compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
qcom,msm_bus,name = "qseecom-noc";
qcom,msm_bus,num_cases = <4>;
qcom,msm_bus,active_only = <0>;
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 1e647a7..213da90 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -474,3 +474,22 @@
"MIC BIAS4 External", "Digital Mic6";
qcom,taiko-mclk-clk-freq = <12288000>;
};
+
+* msm-adsp-loader
+
+Required properties:
+ - compatible : "msm-adsp-loader"
+ - qcom,adsp-state:
+ It is possible that some MSM use PIL to load the ADSP image. While
+ other MSM may use SBL to load the ADSP image at boot. Audio APR needs
+ state of ADSP to register and enable APR to be used for sending commands
+ to ADSP. so adsp-state represents the state of ADSP to ADSP loader. Value
+ of 0 indicates ADSP loader needs to use PIL and value of 2 means ADSP
+ image is already loaded by SBL.
+
+Example:
+
+qcom,msm-adsp-loader {
+ compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <2>;
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index e784bfa..ae7d736 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,15 +34,20 @@
- reg : offset and length of the register set for the device.
- interrupts : should contain the uart interrupt.
-Optional properties:
-- cell-index: An integer specifying the line number of the UART device that
- represents this HSL hardware instance.
+Aliases:
+An alias may optionally be used to bind the serial device to a tty device
+(ttyHSLx) with a given line number. Aliases are of the form serial<n> where <n>
+is an integer representing the line number to use. On systems with multiple
+serial devices present it is recommended that an alias be defined for each such
+device.
Example:
+ aliases {
+ serial0 = &uart0; // This device will be called ttyHSL0
+ };
- serial@19c400000 {
+ uart0: serial@19c400000 {
compatible = "qcom,msm-lsuart-v14"
reg = <0x19c40000 0x1000">;
interrupts = <195>;
- cell-index = <0>; // this device will be named ttyHSL0
};
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
new file mode 100644
index 0000000..0e59f69
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -0,0 +1,20 @@
+MSM HSIC EHCI controller
+
+Required properties :
+- compatible : should be "qcom,hsic-host"
+- regs : offset and length of the register set in the memory map
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+ HSIC EHCI expects "core_irq" and optionally "async_irq".
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSIC_VDDCX" and optionally - "HSIC_GDSC".
+
+Example MSM HSIC EHCI controller device node :
+ hsic@f9a15000 {
+ compatible = "qcom,hsic-host";
+ reg = <0xf9a15000 0x400>;
+ interrupts = <0 136 0>;
+ interrupt-names = "core_irq";
+ HSIC_VDDCX-supply = <&pm8019_l12>;
+ HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 57d776f..d686523 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -10,12 +10,20 @@
"irq" : Interrupt for DWC3 core
"otg_irq" : Interrupt for DWC3 core's OTG Events
- <supply-name>-supply: phandle to the regulator device tree node
- Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
- "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3" and "vbus_dwc3".
+ Required "supply-name" examples are:
+ "SSUSB_lp8" : 1.8v supply for SSPHY
+ "HSUSB_1p8" : 1.8v supply for HSPHY
+ "HSUSB_3p3" : 3.3v supply for HSPHY
+ "vbus_dwc3" : vbus supply for host mode
+ "ssusb_vdd_dig" : vdd supply for SSPHY digital circuit operation
+ "hsusb_vdd_dig" : vdd supply for HSPHY digital circuit operation
- qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
the DBM (Device Bus Manager). The DBM is HW unit which is part of
the MSM USB3.0 core (which also includes the Synopsys DesignWare
USB3.0 controller)
+- qcom,vdd-voltage-level: This property must be a list of three integer
+ values (no, min, max) where each value represents either a voltage in
+ microvolts or a value corresponding to voltage corner
Optional properties :
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
@@ -40,13 +48,14 @@
<0xFD4AB000 0x4>;
interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
interrupt-names = "irq", "otg_irq", "hs_phy_irq";
- SSUSB_VDDCX-supply = <&pm8841_s2>;
+ ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
SSUSB_1p8-supply = <&pm8941_l6>;
- HSUSB_VDDCX-supply = <&pm8841_s2>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
HSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_3p3-supply = <&pm8941_l24>;
vbus_dwc3-supply = <&pm8941_mvs1>;
qcom,dwc-usb3-msm-dbm-eps = <4>
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,msm_bus,name = "usb3";
qcom,msm_bus,num_cases = <2>;
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 7961b78..502d34a 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -272,5 +272,33 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8644.dtsi"
/include/ "mpq8092-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
index f83fe76..f0570ba 100644
--- a/arch/arm/boot/dts/msm-gdsc.dtsi
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -18,41 +18,48 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_venus";
reg = <0xfd8c1024 0x4>;
+ status = "disabled";
};
gdsc_mdss: qcom,gdsc@fd8c2304 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_mdss";
reg = <0xfd8c2304 0x4>;
+ status = "disabled";
};
gdsc_jpeg: qcom,gdsc@fd8c35a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_jpeg";
reg = <0xfd8c35a4 0x4>;
+ status = "disabled";
};
gdsc_vfe: qcom,gdsc@fd8c36a4 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_vfe";
reg = <0xfd8c36a4 0x4>;
+ status = "disabled";
};
gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_gx";
reg = <0xfd8c4024 0x4>;
+ status = "disabled";
};
gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_oxili_cx";
reg = <0xfd8c4034 0x4>;
+ status = "disabled";
};
gdsc_usb_hsic: qcom,gdsc@fc400404 {
compatible = "qcom,gdsc";
regulator-name = "gdsc_usb_hsic";
reg = <0xfc400404 0x4>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 09b57a4..b900c3f 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -12,6 +12,7 @@
/include/ "skeleton.dtsi"
/include/ "msm8226-ion.dtsi"
+/include/ "msm-gdsc.dtsi"
/ {
model = "Qualcomm MSM 8226";
@@ -84,4 +85,28 @@
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm8226-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8910-rumi.dts b/arch/arm/boot/dts/msm8910-rumi.dts
new file mode 100644
index 0000000..0d944aa
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-rumi.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8910.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8910 Rumi";
+ compatible = "qcom,msm8910-rumi", "qcom,msm8910";
+ qcom,msm-id = <147 1 0>;
+
+ serial@f991f000 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index e1b2863..7557fd1 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -192,6 +193,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc2 {
#address-cells = <0>;
interrupt-parent = <&sdcc2>;
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 15fb799..cf45ceb 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -109,6 +110,22 @@
};
};
+ i2c@f9967000 {
+ sii8334@72 {
+ compatible = "qcom,mhl-sii8334";
+ reg = <0x72>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <82 0x8>;
+ mhl-intr-gpio = <&msmgpio 82 0>;
+ mhl-pwr-gpio = <&msmgpio 12 0>;
+ mhl-rst-gpio = <&pm8941_mpps 8 0>;
+ avcc_18-supply = <&pm8941_l24>;
+ avcc_12-supply = <&pm8941_l2>;
+ smps3a-supply = <&pm8941_s3>;
+ vdda-supply = <&pm8941_l12>;
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -192,6 +209,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc1 {
qcom,bus-width = <4>;
};
@@ -256,6 +333,13 @@
};
gpio@cb00 { /* GPIO 12 */
+ qcom,mode = <1>; /* QPNP_PIN_MODE_DIG_OUT */
+ qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,out-strength = <2>; /* QPNP_PIN_OUT_STRENGTH_MED */
+ qcom,src-select = <0>; /* QPNP_PIN_SEL_FUNC_CONSTANT */
+ qcom,master-en = <1>;
};
gpio@cc00 { /* GPIO 13 */
@@ -384,6 +468,12 @@
};
mpp@a700 { /* MPP 8 */
+ qcom,mode = <1>; /* DIG_OUT */
+ qcom,output-type = <0>; /* CMOS */
+ qcom,pull-up = <0>;
+ qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
+ qcom,src-select = <0>; /* CONSTANT */
+ qcom,master-en = <1>; /* ENABLE MPP */
};
};
diff --git a/arch/arm/boot/dts/msm8974-leds.dtsi b/arch/arm/boot/dts/msm8974-leds.dtsi
new file mode 100644
index 0000000..89bb687
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-leds.dtsi
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d000 {
+ status = "okay";
+ qcom,rgb_0 {
+ label = "rgb";
+ linux,name = "led:rgb_red";
+ qcom,mode = <0>;
+ qcom,pwm-channel = <6>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <3>;
+ linux,default-trigger =
+ "battery-charging";
+ };
+
+ qcom,rgb_1 {
+ label = "rgb";
+ linux,name = "led:rgb_green";
+ qcom,mode = <0>;
+ qcom,pwm-channel = <5>;
+ qcom,pwm-us = <1000>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <4>;
+ linux,default-trigger = "battery-full";
+ };
+ };
+
+ qcom,leds@d100 {
+ status = "disabled";
+ };
+
+ qcom,leds@d2000 {
+ status = "disabled";
+ };
+
+ qcom,leds@d300 {
+ status = "okay";
+ qcom,flash_0 {
+ qcom,max-current = <1000>;
+ qcom,default-state = "off";
+ qcom,headroom = <0>;
+ qcom,duration = <1280>;
+ qcom,clamp-curr = <200>;
+ qcom,startup-dly = <1>;
+ qcom,safety-timer;
+ label = "flash";
+ linux,default-trigger =
+ "flash0_trigger";
+ qcom,id = <1>;
+ linux,name = "led:flash_0";
+ qcom,current = <625>;
+ };
+
+ qcom,flash_1 {
+ qcom,max-current = <1000>;
+ qcom,default-state = "off";
+ qcom,headroom = <0>;
+ qcom,duration = <1280>;
+ qcom,clamp-curr = <200>;
+ qcom,startup-dly = <1>;
+ qcom,safety-timer;
+ linux,default-trigger =
+ "flash1_trigger";
+ label = "flash";
+ qcom,id = <2>;
+ linux,name = "led:flash_1";
+ qcom,current = <625>;
+ };
+ };
+
+ qcom,leds@d400 {
+ status = "disabled";
+ };
+
+ qcom,leds@d500 {
+ status = "disabled";
+ };
+
+ qcom,leds@d600 {
+ status = "disabled";
+ };
+
+ qcom,leds@d700 {
+ status = "disabled";
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 96889aa..d04462f 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -11,6 +11,7 @@
*/
/include/ "msm8974-camera-sensor-liquid.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -207,6 +208,12 @@
startup-delay-us = <12000>;
enable-active-high;
};
+
+ sound {
+ qcom,model = "msm8974-taiko-liquid-snd-card";
+ qcom,ext-spk-amp-supply = <&ext_5v>;
+ qcom,ext-spk-amp-gpio = <&pm8841_mpps 1 0>;
+ };
};
&usb3 {
@@ -453,6 +460,12 @@
&pm8841_mpps {
mpp@a000 { /* MPP 1 */
+ /* CLASS_D_EN speakers PA */
+ qcom,mode = <1>; /* DIG_OUT */
+ qcom,output-type = <0>; /* PNP_PIN_OUT_BUF_CMOS */
+ qcom,vin-sel = <2>; /* S3A 1.8v */
+ qcom,src-select = <0>; /* CONSTANT */
+ qcom,master-en = <1>; /* ENABLE MPP */
};
mpp@a100 { /* MPP 2 */
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 80d2440..8563996 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -12,6 +12,7 @@
/include/ "dsi-panel-toshiba-720p-video.dtsi"
/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
/ {
serial@f991e000 {
@@ -192,6 +193,66 @@
};
};
+&spmi_bus {
+ qcom,pm8941@1 {
+ qcom,leds@d800 {
+ status = "okay";
+ qcom,wled_0 {
+ label = "wled";
+ linux,name = "wled:backlight";
+ linux,default-trigger = "bkl-trigger";
+ qcom,cs-out-en;
+ qcom,op-fdbck;
+ qcom,default-state = "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ };
+ };
+
+ qcom,leds@d900 {
+ status = "disabled";
+ };
+
+ qcom,leds@da00 {
+ status = "disabled";
+ };
+
+ qcom,leds@db00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dc00 {
+ status = "disabled";
+ };
+
+ qcom,leds@dd00 {
+ status = "disabled";
+ };
+
+ qcom,leds@de00 {
+ status = "disabled";
+ };
+
+ qcom,leds@df00 {
+ status = "disabled";
+ };
+
+ qcom,leds@e000 {
+ status = "disabled";
+ };
+
+ qcom,leds@e100 {
+ status = "disabled";
+ };
+ };
+};
+
&sdcc2 {
#address-cells = <0>;
interrupt-parent = <&sdcc2>;
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 495d3fb..3f7e9de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -71,7 +71,6 @@
rpm-regulator-smpb2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8841_s2: regulator-s2 {
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1050000>;
@@ -131,7 +130,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_s2: regulator-s2 {
regulator-min-microvolt = <2150000>;
regulator-max-microvolt = <2150000>;
@@ -284,7 +282,6 @@
rpm-regulator-ldoa12 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8941_l12: regulator-l12 {
parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 93ba2bf..19b8828 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -156,6 +156,19 @@
qcom,bus-width = <8>;
qcom,nonremovable;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ qcom,msm-bus,name = "sdcc1";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
+ <78 512 6656 13312>, /* 13 MB/s*/
+ <78 512 13312 26624>, /* 26 MB/s */
+ <78 512 26624 53248>, /* 52 MB/s */
+ <78 512 53248 106496>, /* 104 MB/s */
+ <78 512 106496 212992>, /* 208 MB/s */
+ <78 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <42>;
};
@@ -190,6 +203,19 @@
qcom,xpc;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
qcom,current-limit = <800>;
+
+ qcom,msm-bus,name = "sdcc2";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 6656 13312>, /* 13 MB/s*/
+ <81 512 13312 26624>, /* 26 MB/s */
+ <81 512 26624 53248>, /* 52 MB/s */
+ <81 512 53248 106496>, /* 104 MB/s */
+ <81 512 106496 212992>, /* 208 MB/s */
+ <81 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
qcom,dat1-mpm-int = <44>;
};
@@ -222,6 +248,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc3";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
+ <79 512 6656 13312>, /* 13 MB/s*/
+ <79 512 13312 26624>, /* 26 MB/s */
+ <79 512 26624 53248>, /* 52 MB/s */
+ <79 512 53248 106496>, /* 104 MB/s */
+ <79 512 106496 212992>, /* 208 MB/s */
+ <79 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -254,6 +293,19 @@
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
+
+ qcom,msm-bus,name = "sdcc4";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
+ <80 512 6656 13312>, /* 13 MB/s*/
+ <80 512 13312 26624>, /* 26 MB/s */
+ <80 512 26624 53248>, /* 52 MB/s */
+ <80 512 53248 106496>, /* 104 MB/s */
+ <80 512 106496 212992>, /* 208 MB/s */
+ <80 512 2147483647 4294967295>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
status = "disable";
};
@@ -540,116 +592,6 @@
<0x1e50008a>, /* LPG_CHAN10 */
<0x1e60008b>, /* LPG_CHAN11 */
<0x1e70008c>; /* LPG_CHAN12 */
-
- qcom,pm8941@1 {
- qcom,leds@d300 {
- status = "okay";
- qcom,flash_0 {
- qcom,max-current = <1000>;
- qcom,default-state = "off";
- qcom,headroom = <0>;
- qcom,duration = <1280>;
- qcom,clamp-curr = <200>;
- qcom,startup-dly = <1>;
- qcom,safety-timer;
- label = "flash";
- linux,default-trigger =
- "flash0_trigger";
- qcom,id = <1>;
- linux,name = "led:flash_0";
- qcom,current = <625>;
- };
-
- qcom,flash_1 {
- qcom,max-current = <1000>;
- qcom,default-state = "off";
- qcom,headroom = <0>;
- qcom,duration = <1280>;
- qcom,clamp-curr = <200>;
- qcom,startup-dly = <1>;
- qcom,safety-timer;
- linux,default-trigger =
- "flash1_trigger";
- label = "flash";
- qcom,id = <2>;
- linux,name = "led:flash_1";
- qcom,current = <625>;
- };
- };
-
- qcom,leds@d400 {
- status = "disabled";
- };
-
- qcom,leds@d500 {
- status = "disabled";
- };
-
- qcom,leds@d600 {
- status = "disabled";
- };
-
- qcom,leds@d700 {
- status = "disabled";
- };
-
- qcom,leds@d800 {
- status = "okay";
- qcom,wled_0 {
- label = "wled";
- linux,name = "wled:backlight";
- linux,default-trigger = "bkl-trigger";
- qcom,cs-out-en;
- qcom,op-fdbck;
- qcom,default-state = "off";
- qcom,max-current = <25>;
- qcom,ctrl-delay-us = <0>;
- qcom,boost-curr-lim = <3>;
- qcom,cp-sel = <0>;
- qcom,switch-freq = <2>;
- qcom,ovp-val = <2>;
- qcom,num-strings = <1>;
- qcom,id = <0>;
- };
- };
-
- qcom,leds@d900 {
- status = "disabled";
- };
-
- qcom,leds@da00 {
- status = "disabled";
- };
-
- qcom,leds@db00 {
- status = "disabled";
- };
-
- qcom,leds@dc00 {
- status = "disabled";
- };
-
- qcom,leds@dd00 {
- status = "disabled";
- };
-
- qcom,leds@de00 {
- status = "disabled";
- };
-
- qcom,leds@df00 {
- status = "disabled";
- };
-
- qcom,leds@e000 {
- status = "disabled";
- };
-
- qcom,leds@e100 {
- status = "disabled";
- };
-
- };
};
i2c@f9967000 { /* BLSP#11 */
@@ -724,13 +666,14 @@
<0xfd4ab000 0x4>;
interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
interrupt-names = "irq", "otg_irq", "hs_phy_irq";
- SSUSB_VDDCX-supply = <&pm8841_s2>;
+ ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
SSUSB_1p8-supply = <&pm8941_l6>;
- HSUSB_VDDCX-supply = <&pm8841_s2>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
HSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_3p3-supply = <&pm8941_l24>;
vbus_dwc3-supply = <&pm8941_mvs1>;
qcom,dwc-usb3-msm-dbm-eps = <4>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,msm-bus,name = "usb3";
qcom,msm-bus,num-cases = <2>;
@@ -757,6 +700,7 @@
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <0>;
};
qcom,msm-pcm {
@@ -933,7 +877,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
- qcom,is_loadable = <1>;
+ qcom,is-loadable;
qcom,firmware-name = "mba";
qcom,pil-self-auth = <1>;
};
@@ -1029,6 +973,8 @@
qcom,qseecom@fe806000 {
compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,active-only = <0>;
@@ -1212,7 +1158,7 @@
};
qcom,msm-mem-hole {
compatible = "qcom,msm-mem-hole";
- qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
+ qcom,memblock-remove = <0x7f00000 0x8000000>; /* Address and Size of Hole */
};
qcom,smem@fa00000 {
@@ -1284,6 +1230,34 @@
};
};
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_mdss {
+ status = "ok";
+};
+
+&gdsc_jpeg {
+ status = "ok";
+};
+
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_gx {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
+&gdsc_usb_hsic {
+ status = "ok";
+};
+
/include/ "msm-pm8x41-rpm-regulator.dtsi"
/include/ "msm-pm8841.dtsi"
/include/ "msm-pm8941.dtsi"
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index b128648..24f616d 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -23,7 +23,6 @@
rpm-regulator-smpa2 {
status = "okay";
- qcom,allow-atomic = <1>;
pm8019_s2: regulator-s2 {
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <1250000>;
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index b79f370..e1e72b5 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -88,6 +88,15 @@
reg = <0xfc42b0c8 0xc8>;
};
+ hsic@f9a15000 {
+ compatible = "qcom,hsic-host";
+ reg = <0xf9a15000 0x400>;
+ interrupts = <0 136 0>;
+ interrupt-names = "core_irq";
+ HSIC_VDDCX-supply = <&pm8019_l12>;
+ HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+ };
+
qcom,nand@f9ac0000 {
compatible = "qcom,msm-nand";
reg = <0xf9ac0000 0x1000>,
@@ -245,6 +254,42 @@
interrupts = <0 29 1>;
};
+ qcom,ipa@fd4c0000 {
+ compatible = "qcom,ipa";
+ reg = <0xfd4c0000 0x26000>,
+ <0xfd4c4000 0x14818>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 252 0>,
+ <0 253 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+
+ qcom,pipe1 {
+ label = "a2-to-ipa";
+ qcom,src-bam-physical-address = <0xfc834000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <1>;
+ qcom,dst-bam-physical-address = <0xfd4c0000>;
+ qcom,dst-bam-pipe-index = <6>;
+ qcom,data-fifo-offset = <0x1000>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0x1d00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+
+ qcom,pipe2 {
+ label = "ipa-to-a2";
+ qcom,src-bam-physical-address = <0xfd4c0000>;
+ qcom,ipa-bam-mem-type = <0>;
+ qcom,src-bam-pipe-index = <7>;
+ qcom,dst-bam-physical-address = <0xfc834000>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x00>;
+ qcom,data-fifo-size = <0xd00>;
+ qcom,descriptor-fifo-offset = <0xd00>;
+ qcom,descriptor-fifo-size = <0x300>;
+ };
+ };
+
qcom,acpuclk@f9010000 {
compatible = "qcom,acpuclk-9625";
reg = <0xf9010008 0x10>,
@@ -393,6 +438,7 @@
qcom,msm-adsp-loader {
compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <2>;
};
qcom,msm-pcm {
@@ -440,6 +486,11 @@
qcom,msm-dai-q6 {
compatible = "qcom,msm-dai-q6";
};
+
+ qcom,mss {
+ compatible = "qcom,pil-q6v5-mss";
+ interrupts = <0 24 1>;
+ };
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index e2e05b2..2dd4b30 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -46,7 +46,7 @@
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-# CONFIG_SMP_ON_UP is not set
+CONFIG_SMP=y
CONFIG_ARM_ARCH_TIMER=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 0070e22..a721f30 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -37,7 +37,6 @@
CONFIG_EFI_PARTITION=y
CONFIG_ARCH_MSM=y
CONFIG_ARCH_MSM8974=y
-CONFIG_ARCH_MSM8226=y
CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
# CONFIG_MSM_STACKED_MEMORY is not set
CONFIG_CPU_HAS_L2_PMU=y
@@ -75,6 +74,7 @@
CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
@@ -217,6 +217,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
CONFIG_QSEECOM=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 33400ea..973eef9 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -219,6 +219,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
CONFIG_QSEECOM=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index b9add04..1a8bbfc 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -42,6 +42,9 @@
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_MSM_DLOAD_MODE=y
@@ -125,6 +128,7 @@
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
+CONFIG_MSM_BUS_SCALING=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
@@ -164,6 +168,7 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_IPA=y
CONFIG_SPS=y
CONFIG_USB_BAM=y
CONFIG_SPS_SUPPORT_BAMDMA=y
@@ -251,3 +256,9 @@
CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_MSM_ADSP_LOADER=m
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index a0868c7..49eb544 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -279,6 +279,7 @@
select MEMORY_HOLE_CARVEOUT
select MSM_RPM_STATS_LOG
select QMI_ENCDEC
+ select DONT_MAP_HOLE_AFTER_MEMBANK0
config ARCH_MPQ8092
bool "MPQ8092"
@@ -369,7 +370,6 @@
bool "MSM8910"
select ARM_GIC
select GIC_SECURE
- select SMP
select ARCH_MSM_CORTEXMP
select CPU_V7
select MSM_SCM if SMP
@@ -385,7 +385,6 @@
bool "MSM8226"
select ARM_GIC
select GIC_SECURE
- select SMP
select ARCH_MSM_CORTEXMP
select CPU_V7
select MSM_SCM if SMP
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index b9d0527..10c4d6c 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -977,7 +977,11 @@
/* Fall through. */
case CPU_UP_CANCELED:
acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
+
+ regulator_disable(sc->vreg[VREG_CORE].reg);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
+ regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
+ sc->vreg[VREG_CORE].max_vdd);
break;
case CPU_UP_PREPARE:
if (!sc->initialized) {
@@ -988,10 +992,20 @@
}
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
+
+ rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ sc->vreg[VREG_CORE].cur_vdd,
+ sc->vreg[VREG_CORE].max_vdd);
+ if (rc < 0)
+ return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
+ rc = regulator_enable(sc->vreg[VREG_CORE].reg);
+ if (rc < 0)
+ return NOTIFY_BAD;
+
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
break;
default:
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 851f7d9..a66495d 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -455,7 +455,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index e5263c7..6b0cd22 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -63,6 +63,12 @@
CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
CLK_DUMMY("iface_clk", HSUSB_IFACE_CLK, "f9a55000.usb", OFF),
CLK_DUMMY("core_clk", HSUSB_CORE_CLK, "f9a55000.usb", OFF),
+ CLK_DUMMY("iface_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("core_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("bus_clk", NULL, "msm_sdcc.1", OFF),
+ CLK_DUMMY("iface_clk", NULL, "msm_sdcc.2", OFF),
+ CLK_DUMMY("core_clk", NULL, "msm_sdcc.2", OFF),
+ CLK_DUMMY("bus_clk", NULL, "msm_sdcc.2", OFF),
};
static struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -70,6 +76,14 @@
.size = ARRAY_SIZE(msm_clocks_dummy),
};
+static struct of_dev_auxdata msm8226_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \
+ "msm_sdcc.1", NULL),
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
+ "msm_sdcc.2", NULL),
+ {}
+};
+
static struct reserve_info msm8226_reserve_info __initdata = {
.memtype_reserve_table = msm8226_reserve_table,
.paddr_to_memtype = msm8226_paddr_to_memtype,
@@ -89,13 +103,16 @@
void __init msm8226_init(void)
{
+ struct of_dev_auxdata *adata = msm8226_auxdata_lookup;
+
msm8226_init_gpiomux();
+
msm_clock_init(&msm_dummy_clock_init_data);
if (socinfo_init() < 0)
pr_err("%s: socinfo_init() failed\n", __func__);
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
}
static const char *msm8226_dt_match[] __initconst = {
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index 947697a..eaebea0 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -449,7 +449,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index 3ee052b..9a2967a 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -487,7 +487,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index f9e2c8e..397411d 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -382,7 +382,8 @@
{ \
.constraints = { \
.name = _name, \
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
+ REGULATOR_CHANGE_STATUS, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
}, \
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index b092a53..c47b688 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -71,10 +71,17 @@
.paddr_to_memtype = msm8974_paddr_to_memtype,
};
-static void __init msm8974_early_memory(void)
+void __init msm_8974_reserve(void)
{
reserve_info = &msm8974_reserve_info;
of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table);
+ msm_reserve();
+}
+
+static void __init msm8974_early_memory(void)
+{
+ reserve_info = &msm8974_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
}
#define BIMC_BASE 0xfc380000
@@ -389,7 +396,7 @@
.handle_irq = gic_handle_irq,
.timer = &msm_dt_timer,
.dt_compat = msm8974_dt_match,
- .reserve = msm_reserve,
+ .reserve = msm_8974_reserve,
.init_very_early = msm8974_init_very_early,
.restart = msm_restart,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 35b68b1..39060ad 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -925,6 +925,9 @@
&msm_cpudai_sec_auxpcm_rx,
&msm_cpudai_sec_auxpcm_tx,
&msm_cpudai_stub,
+ &msm_cpudai_incall_music_rx,
+ &msm_cpudai_incall_record_rx,
+ &msm_cpudai_incall_record_tx,
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 33ec10a..b284168 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -472,7 +472,6 @@
},
.base = &virt_bases[APCS_PLL_BASE],
.c = {
- .parent = &cxo_clk_src.c,
.dbg_name = "apcspll_clk_src",
.ops = &clk_ops_local_pll,
CLK_INIT(apcspll_clk_src.c),
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 3888a4e..e55e9a7 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -562,6 +562,21 @@
.id = -1,
};
+struct platform_device msm_cpudai_incall_music_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8005,
+};
+
+struct platform_device msm_cpudai_incall_record_rx = {
+ .name = "msm-dai-q6",
+ .id = 0x8004,
+};
+
+struct platform_device msm_cpudai_incall_record_tx = {
+ .name = "msm-dai-q6",
+ .id = 0x8003,
+};
+
struct platform_device msm_i2s_cpudai0 = {
.name = "msm-dai-q6",
.id = PRIMARY_I2S_RX,
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
new file mode 100644
index 0000000..0f689ac
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -0,0 +1,458 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/sps.h>
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+ IPA_BYPASS_NAT,
+ IPA_SRC_NAT,
+ IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ */
+enum ipa_mode_type {
+ IPA_BASIC,
+ IPA_ENABLE_FRAMING_HDLC,
+ IPA_ENABLE_DEFRAMING_HDLC,
+ IPA_DMA,
+};
+
+/**
+ * enum ipa_aggr_en_type - aggregation setting type in IPA
+ * end-point
+ */
+enum ipa_aggr_en_type {
+ IPA_BYPASS_AGGR,
+ IPA_ENABLE_AGGR,
+ IPA_ENABLE_DEAGGR,
+};
+
+/**
+ * enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+ IPA_MBIM_16,
+ IPA_MBIM_32,
+ IPA_TLP,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+ IPA_MBIM,
+ IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+ IPA_RECEIVE,
+ IPA_WRITE_DONE,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en: This defines the default NAT mode for the pipe: in case of
+ * filter miss - the default NAT mode defines the NATing operation
+ * on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+ enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ * @hdr_len: Header length in bytes to be added/removed. Assuming header len
+ * is constant per endpoint. Valid for both Input and Output Pipes
+ * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no
+ * metadata within header.
+ * 1: Metadata_Ofst value is valid, i.e., metadata
+ * within header is in offset Metadata_Ofst Valid
+ * for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_ofst_metadata: Offset within header in which metadata resides
+ * Size of metadata - 4bytes
+ * Example - Stream ID/SSID/mux ID.
+ * Valid for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_additional_const_len: Defines the constant length that should be added
+ * to the payload length in order for IPA to update
+ * correctly the length field within the header
+ * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no
+ * length field within the inserted header
+ * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a
+ * packet length field resides within the header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon
+ * Header Insertion, IPA will update this field within the
+ * header with the packet length . Assumption is that
+ * header length field size is constant and is 2Bytes
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet.
+ * This bit is valid only when Hdr_En=01(Header Insertion)
+ * SW should set this bit for IPA-to-A5 pipes.
+ * 0: Do not insert A5 Mux Header
+ * 1: Insert A5 Mux Header
+ * Valid for Output Pipes (IPA Producer)
+ */
+struct ipa_ep_cfg_hdr {
+ u32 hdr_len;
+ u32 hdr_ofst_metadata_valid;
+ u32 hdr_ofst_metadata;
+ u32 hdr_additional_const_len;
+ u32 hdr_ofst_pkt_size_valid;
+ u32 hdr_ofst_pkt_size;
+ u32 hdr_a5_mux;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode: Valid for Input Pipes only (IPA Consumer)
+ * @dst: This parameter specifies the output pipe to which the packets
+ * will be routed to.
+ * This parameter is valid for Mode=DMA and not valid for
+ * Mode=Basic
+ * Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+ enum ipa_mode_type mode;
+ enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ * @aggr_en: Valid for both Input and Output Pipes
+ * @aggr: Valid for both Input and Output Pipes
+ * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set
+ * to 0, there is no size limitation on the aggregation.
+ * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ * to 0, there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0,
+ * there is no time limitation on the aggregation. When
+ * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ * there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_aggr {
+ enum ipa_aggr_en_type aggr_en;
+ enum ipa_aggr_type aggr;
+ u32 aggr_byte_limit;
+ u32 aggr_time_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl: Defines the default routing table index to be used in case there
+ * is no filter rule matching, valid for Input Pipes only (IPA
+ * Consumer). Clients should set this to 0 which will cause default
+ * v4 and v6 routes setup internally by IPA driver to be used for
+ * this end-point
+ */
+struct ipa_ep_cfg_route {
+ u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat: NAT parmeters
+ * @hdr: Header parameters
+ * @mode: Mode parameters
+ * @aggr: Aggregation parameters
+ * @route: Routing parameters
+ */
+struct ipa_ep_cfg {
+ struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_hdr hdr;
+ struct ipa_ep_cfg_mode mode;
+ struct ipa_ep_cfg_aggr aggr;
+ struct ipa_ep_cfg_route route;
+};
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @client_bam_hdl: client SPS handle
+ * @client_ep_idx: client PER EP index
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @desc_fifo_sz: size of desc FIFO
+ * @data_fifo_sz: size of data FIFO
+ * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback
+ * to sys mem if pipe mem alloc fails
+ * @desc: desc FIFO meta-data when client has allocated it
+ * @data: data FIFO meta-data when client has allocated it
+ */
+struct ipa_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 client_bam_hdl;
+ u32 client_ep_idx;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ u32 desc_fifo_sz;
+ u32 data_fifo_sz;
+ bool pipe_mem_preferred;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_sps_params - SPS related output parameters resulting from
+ * low/high level client connect
+ * @ipa_bam_hdl: IPA SPS handle
+ * @ipa_ep_idx: IPA PER EP index
+ * @desc: desc FIFO meta-data
+ * @data: data FIFO meta-data
+ */
+struct ipa_sps_params {
+ u32 ipa_bam_hdl;
+ u32 ipa_ep_idx;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props: number of tx properties
+ * @prop: the tx properties array
+ */
+struct ipa_tx_intf {
+ u32 num_props;
+ struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props: number of rx properties
+ * @prop: the rx properties array
+ */
+struct ipa_rx_intf {
+ u32 num_props;
+ struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie
+ * evt - type of event
+ * data - data relevant to event. May not be valid. See event_type
+ * enum for valid cases.
+ */
+struct ipa_sys_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 desc_fifo_sz;
+ void *priv;
+ void (*notify)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+};
+
+/**
+ * struct ipa_msg_meta_wrapper - message meta-data wrapper
+ * @meta: the meta-data itself
+ * @link: opaque to client
+ * @meta_wrapper_free: function to free the metadata wrapper when IPA driver
+ * is done with it
+ */
+struct ipa_msg_meta_wrapper {
+ struct ipa_msg_meta meta;
+ struct list_head link;
+ void (*meta_wrapper_free)(struct ipa_msg_meta_wrapper *buff);
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @mbim_stream_id: the stream ID used in NDP signature
+ * @mbim_stream_id_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+ u8 mbim_stream_id;
+ bool mbim_stream_id_valid;
+};
+
+/**
+ * struct ipa_msg_wrapper - message wrapper
+ * @msg: the message buffer itself, MUST exist after call returns, will
+ * be freed by IPA driver when it is done with it
+ * @link: opaque to client
+ * @msg_free: function to free the message when IPA driver is done with it
+ * @msg_wrapper_free: function to free the message wrapper when IPA driver is
+ * done with it
+ */
+struct ipa_msg_wrapper {
+ void *msg;
+ struct list_head link;
+ void (*msg_free)(void *msg);
+ void (*msg_wrapper_free)(struct ipa_msg_wrapper *buff);
+};
+
+/**
+ * typedef ipa_pull_fn - callback function
+ * @buf - [in] the buffer to populate the message into
+ * @sz - [in] the size of the buffer
+ *
+ * callback function registered by kernel client with IPA driver for IPA driver
+ * to be able to pull messages from the kernel client asynchronously.
+ *
+ * Returns how many bytes were copied into the buffer, negative on failure.
+ */
+typedef int (*ipa_pull_fn)(void *buf, uint16_t sz);
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * rmnet bridge
+ */
+int rmnet_bridge_init(void);
+
+int rmnet_bridge_disconnect(void);
+
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index acfbe4a..d089924 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -98,26 +98,25 @@
#define finish_arch_switch(prev) do { store_ttbr0(); } while (0)
#endif
+#define MAX_HOLE_ADDRESS (PHYS_OFFSET + 0x10000000)
+extern unsigned long memory_hole_offset;
+extern unsigned long memory_hole_start;
+extern unsigned long memory_hole_end;
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-extern unsigned long membank0_size;
-extern unsigned long membank1_start;
-void find_membank0_hole(void);
+void find_memory_hole(void);
-#define MEMBANK0_PHYS_OFFSET PHYS_OFFSET
-#define MEMBANK0_PAGE_OFFSET PAGE_OFFSET
-
-#define MEMBANK1_PHYS_OFFSET (membank1_start)
-#define MEMBANK1_PAGE_OFFSET (MEMBANK0_PAGE_OFFSET + (membank0_size))
+#define MEM_HOLE_END_PHYS_OFFSET (memory_hole_end)
+#define MEM_HOLE_PAGE_OFFSET (PAGE_OFFSET + memory_hole_offset)
#define __phys_to_virt(phys) \
- ((MEMBANK1_PHYS_OFFSET && ((phys) >= MEMBANK1_PHYS_OFFSET)) ? \
- (phys) - MEMBANK1_PHYS_OFFSET + MEMBANK1_PAGE_OFFSET : \
- (phys) - MEMBANK0_PHYS_OFFSET + MEMBANK0_PAGE_OFFSET)
+ ((MEM_HOLE_END_PHYS_OFFSET && ((phys) >= MEM_HOLE_END_PHYS_OFFSET)) ? \
+ (phys) - MEM_HOLE_END_PHYS_OFFSET + MEM_HOLE_PAGE_OFFSET : \
+ (phys) - PHYS_OFFSET + PAGE_OFFSET)
#define __virt_to_phys(virt) \
- ((MEMBANK1_PHYS_OFFSET && ((virt) >= MEMBANK1_PAGE_OFFSET)) ? \
- (virt) - MEMBANK1_PAGE_OFFSET + MEMBANK1_PHYS_OFFSET : \
- (virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
+ ((MEM_HOLE_END_PHYS_OFFSET && ((virt) >= MEM_HOLE_PAGE_OFFSET)) ? \
+ (virt) - MEM_HOLE_PAGE_OFFSET + MEM_HOLE_END_PHYS_OFFSET : \
+ (virt) - PAGE_OFFSET + PHYS_OFFSET)
#endif
/*
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
index c03b513..bf44ca2 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
@@ -37,7 +37,7 @@
#define MSM8226_TLMM_PHYS 0xFD510000
#define MSM8226_TLMM_SIZE SZ_16K
-#define MSM8226_IMEM_PHYS 0xFC42B000
+#define MSM8226_IMEM_PHYS 0xFE805000
#define MSM8226_IMEM_SIZE SZ_4K
#ifdef CONFIG_DEBUG_MSM8226_UART
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 5ca5861..80e454a 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -68,6 +68,8 @@
int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
int depth, void *data);
-
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+ int depth, void *data);
+void adjust_meminfo(unsigned long start, unsigned long size);
unsigned long __init reserve_memory_for_fmem(unsigned long, unsigned long);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 11d3014..ab5271f 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2009, 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,10 @@
void (*gpio_release)(void);
int (*dma_config)(void);
const char *rsl_id;
- uint32_t pm_lat;
- uint32_t infinite_mode;
+ u32 pm_lat;
+ u32 infinite_mode;
+ bool ver_reg_exists;
+ bool use_bam;
+ u32 bam_consumer_pipe_index;
+ u32 bam_producer_pipe_index;
};
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 34bdc79..0499a7a 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -111,6 +111,7 @@
MSM_CPU_8092,
MSM_CPU_8226,
MSM_CPU_8910,
+ MSM_CPU_8625Q,
};
enum pmic_model {
@@ -447,6 +448,18 @@
#endif
}
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
static inline int soc_class_is_msm8960(void)
{
return cpu_is_msm8960() || cpu_is_msm8960ab();
diff --git a/arch/arm/mach-msm/include/mach/usbdiag.h b/arch/arm/mach-msm/include/mach/usbdiag.h
index d1e3605..4d0f63a 100644
--- a/arch/arm/mach-msm/include/mach/usbdiag.h
+++ b/arch/arm/mach-msm/include/mach/usbdiag.h
@@ -1,6 +1,6 @@
/* include/asm-arm/arch-msm/usbdiag.h
*
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2010, 2012, The Linux Foundation. All rights reserved.
*
* All source code in this file is licensed under the following license except
* where indicated.
@@ -23,6 +23,7 @@
#define DIAG_LEGACY "diag"
#define DIAG_MDM "diag_mdm"
+#define DIAG_QSC "diag_qsc"
#define USB_DIAG_CONNECT 0
#define USB_DIAG_DISCONNECT 1
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 3fe65b8..9cc2a9d 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -27,6 +27,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <mach/msm_memtypes.h>
+#include <mach/memory.h>
#include <linux/hardirq.h>
#if defined(CONFIG_MSM_NPA_REMOTE)
#include "npa_remote.h"
@@ -365,7 +366,7 @@
return ret;
}
-static int check_for_compat(unsigned long node)
+static int __init check_for_compat(unsigned long node)
{
char **start = __compat_exports_start;
@@ -454,6 +455,79 @@
return 0;
}
+/* This function scans the device tree to populate the memory hole table */
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ unsigned int *memory_remove_prop;
+ unsigned long memory_remove_prop_length;
+ unsigned long hole_start;
+ unsigned long hole_size;
+
+ memory_remove_prop = of_get_flat_dt_prop(node,
+ "qcom,memblock-remove",
+ &memory_remove_prop_length);
+
+ if (memory_remove_prop) {
+ if (!check_for_compat(node))
+ goto out;
+ } else {
+ goto out;
+ }
+
+ if (memory_remove_prop) {
+ if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+ WARN(1, "Memory remove malformed\n");
+ goto out;
+ }
+
+ hole_start = be32_to_cpu(memory_remove_prop[0]);
+ hole_size = be32_to_cpu(memory_remove_prop[1]);
+
+ if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
+ if (memory_hole_start == 0 && memory_hole_end == 0) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start + hole_size;
+ } else if ((memory_hole_end - memory_hole_start)
+ <= hole_size) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start + hole_size;
+ }
+ }
+ adjust_meminfo(hole_start, hole_size);
+ }
+
+out:
+ return 0;
+}
+
+/*
+ * Split the memory bank to reflect the hole, if present,
+ * using the start and end of the memory hole.
+ */
+void adjust_meminfo(unsigned long start, unsigned long size)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[j];
+ *bank = meminfo.bank[i];
+
+ if (((start + size) <= (bank->start + bank->size)) &&
+ (start >= bank->start)) {
+ memmove(bank + 1, bank,
+ (meminfo.nr_banks - i) * sizeof(*bank));
+ meminfo.nr_banks++;
+ i++;
+ bank[1].size -= (start + size);
+ bank[1].start = (start + size);
+ bank[1].highmem = 0;
+ j++;
+ bank->size = start - bank->start;
+ }
+ j++;
+ }
+}
unsigned long get_ddr_size(void)
{
unsigned int i;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index e0ab983..ea17efe 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -485,7 +485,7 @@
};
#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000220)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
enum bimc_m_priolvl_override {
M_PRIOLVL_OVERRIDE_RMSK = 0x301,
M_PRIOLVL_OVERRIDE_BMSK = 0x300,
@@ -495,10 +495,10 @@
};
#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
enum bimc_m_read_command_override {
- M_RD_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x300000,
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
@@ -529,13 +529,15 @@
};
#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
- (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
enum bimc_m_write_command_override {
- M_WR_CMD_OVERRIDE_RMSK = 0x37f3f,
- M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x30000,
- M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x10,
- M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x7000,
- M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
@@ -544,8 +546,10 @@
M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x20,
- M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
@@ -1454,7 +1458,7 @@
* boundary in future
*/
wmb();
- set_qos_mode(binfo->base, mas_index, 1, 1, 1);
+ set_qos_mode(binfo->base, mas_index, 0, 1, 1);
break;
case BIMC_QOS_MODE_BYPASS:
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
index f0f5cd8..5c20a4e 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -806,7 +806,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_oxili,
@@ -819,7 +819,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_gemini,
.ws = 10000,
@@ -832,7 +832,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.qport = qports_mdp,
.ws = 10000,
@@ -845,7 +845,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p0,
@@ -858,7 +858,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_venus_p1,
@@ -871,7 +871,7 @@
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.hw_sel = MSM_BUS_NOC,
- .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
.mode = NOC_QOS_MODE_BYPASS,
.ws = 10000,
.qport = qports_vfe,
@@ -1049,9 +1049,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
- .prio_lvl = 0,
- .prio_rd = 2,
- .prio_wr = 2,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_AMPSS_M1,
@@ -1064,6 +1063,8 @@
.qport = qports_kmpss,
.ws = 10000,
.mas_hw_id = MAS_APPSS_PROC,
+ .prio_rd = 1,
+ .prio_wr = 1,
},
{
.id = MSM_BUS_MASTER_MSS_PROC,
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index fb2e5da..9e89256 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -362,16 +362,18 @@
}
for (i = 0; i < info->node_info->num_mports; i++) {
- if (info->node_info->mode != NOC_QOS_MODE_BYPASS)
+ if (info->node_info->mode != NOC_QOS_MODE_BYPASS) {
noc_set_qos_priority(ninfo, info->node_info->qport[i],
prio);
- if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
- struct msm_bus_noc_qos_bw qbw;
- qbw.ws = info->node_info->ws;
- qbw.bw = 0;
- msm_bus_noc_set_qos_bw(ninfo, info->node_info->qport[i],
- info->node_info->perm_mode, &qbw);
+ if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_bw qbw;
+ qbw.ws = info->node_info->ws;
+ qbw.bw = 0;
+ msm_bus_noc_set_qos_bw(ninfo, info->node_info->
+ qport[i], info->node_info->perm_mode,
+ &qbw);
+ }
}
noc_set_qos_mode(ninfo, info->node_info->qport[i], info->
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 7829d8d..34fd8d2 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -395,7 +395,8 @@
pr_debug("ocmem: Disabled br clock\n");
}
-static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
+static struct ocmem_plat_data * __devinit parse_dt_config
+ (struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 6ae7544..6e8d127 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -303,6 +303,7 @@
return IRQ_HANDLED;
}
+ disable_irq_nosync(drv->irq);
drv->restart_inprogress = true;
restart_wcnss(drv);
@@ -324,7 +325,6 @@
pil_shutdown(&drv->desc);
flush_delayed_work(&drv->cancel_vote_work);
wcnss_flush_delayed_boot_votes();
- disable_irq_nosync(drv->irq);
return 0;
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 7652d74..07cbe19 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -423,7 +423,7 @@
struct mba_data *drv = subsys_to_drv(subsys);
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
/* MBA doesn't support shutdown */
pil_shutdown(&drv->q6->desc);
return 0;
@@ -435,7 +435,7 @@
int ret;
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
/*
* At this time, the modem is shutdown. Therefore this function cannot
* run concurrently with either the watchdog bite error handler or the
@@ -527,7 +527,7 @@
struct mba_data *drv = subsys_to_drv(desc);
if (!drv->is_loadable)
- return -ENODEV;
+ return 0;
ret = pil_boot(&drv->q6->desc);
if (ret)
@@ -729,8 +729,8 @@
return -ENOMEM;
platform_set_drvdata(pdev, drv);
- of_property_read_u32(pdev->dev.of_node, "qcom,is_loadable",
- &drv->is_loadable);
+ drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,is-loadable");
if (drv->is_loadable) {
ret = pil_mss_loadable_init(drv, pdev);
if (ret)
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 3b31b9f..0e75cae 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -121,10 +121,10 @@
write_pen_release(-1);
/* clear the IPC pending SPI */
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, cpu)) {
raise_clear_spi(cpu, false);
clear_pending_spi(cpu_data[cpu].ipc_irq);
- power_collapsed = 0;
+ per_cpu(power_collapsed, cpu) = 0;
}
/*
@@ -216,7 +216,7 @@
* GDFS which needs to be brought out by raising an SPI.
*/
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, cpu)) {
gic_configure_and_raise(cpu_data[cpu].ipc_irq, cpu);
raise_clear_spi(cpu, true);
} else {
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index faefe34..bd61feb 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/pm.h
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -27,7 +27,7 @@
#define msm_secondary_startup NULL
#endif
-extern int power_collapsed;
+DECLARE_PER_CPU(int, power_collapsed);
struct msm_pm_irq_calls {
unsigned int (*irq_pending)(void);
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index ae2a4bc..96c1218 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -3,7 +3,7 @@
* MSM Power Management Routines
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2012 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2012 The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -75,8 +75,9 @@
MSM_PM_DEBUG_HOTPLUG = BIT(7),
};
+DEFINE_PER_CPU(int, power_collapsed);
+
static int msm_pm_debug_mask;
-int power_collapsed;
module_param_named(
debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
);
@@ -565,7 +566,7 @@
__raw_writel(0, APPS_PWRDOWN);
mb();
- if (power_collapsed) {
+ if (per_cpu(power_collapsed, 1)) {
/*
* enable the SCU while coming out of power
* collapse.
@@ -983,6 +984,7 @@
* path by reading the MPA5_GDFS_CNT_VAL register.
*/
if (cpu_is_msm8625()) {
+ int cpu;
/*
* on system reset, default value of MPA5_GDFS_CNT_VAL
* is = 0x0, later modem reprogram this value to
@@ -997,7 +999,11 @@
/* 8x25Q */
if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
if (val != 0x000F0002) {
- power_collapsed = 1;
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+ per_cpu(power_collapsed, cpu) = 1;
+ }
/*
* override DBGNOPOWERDN and program the GDFS
* count val
@@ -1008,7 +1014,11 @@
modem_early_exit = 1;
} else {
if (val != 0x00030002) {
- power_collapsed = 1;
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+ per_cpu(power_collapsed, cpu) = 1;
+ }
/*
* override DBGNOPOWERDN and program the GDFS
* count val
diff --git a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
index c28e403..02dbece 100644
--- a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
+++ b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <mach/subsystem_restart.h>
#include <mach/qdsp6v2/apr.h>
+#include <linux/of_device.h>
#define Q6_PIL_GET_DELAY_MS 100
@@ -30,25 +31,41 @@
{
struct adsp_loader_private *priv;
int rc = 0;
+ const char *adsp_dt = "qcom,adsp-state";
+ u32 adsp_state;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
-
- priv->pil_h = subsystem_get("adsp");
- if (IS_ERR(priv->pil_h)) {
- pr_err("%s: pil get adsp failed, error:%d\n", __func__, rc);
- devm_kfree(&pdev->dev, priv);
- goto fail;
+ rc = of_property_read_u32(pdev->dev.of_node, adsp_dt, &adsp_state);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: ADSP state = %x\n", __func__, adsp_state);
+ return rc;
}
- /* Query the DSP to check if resources are available */
- msleep(Q6_PIL_GET_DELAY_MS);
+ if (adsp_state == APR_SUBSYS_DOWN) {
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /* Set the state of the ADSP in APR driver */
- apr_set_q6_state(APR_SUBSYS_LOADED);
+ platform_set_drvdata(pdev, priv);
+
+ priv->pil_h = subsystem_get("adsp");
+ if (IS_ERR(priv->pil_h)) {
+ pr_err("%s: pil get adsp failed, error:%d\n",
+ __func__, rc);
+ devm_kfree(&pdev->dev, priv);
+ goto fail;
+ }
+
+ /* Query the DSP to check if resources are available */
+ msleep(Q6_PIL_GET_DELAY_MS);
+
+ /* Set the state of the ADSP in APR driver */
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ } else if (adsp_state == APR_SUBSYS_LOADED) {
+ dev_dbg(&pdev->dev,
+ "%s:MDM9x25 ADSP state = %x\n", __func__, adsp_state);
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ }
/* Query for MMPM API */
@@ -62,7 +79,8 @@
struct adsp_loader_private *priv;
priv = platform_get_drvdata(pdev);
- subsystem_put(priv->pil_h);
+ if (priv != NULL)
+ subsystem_put(priv->pil_h);
pr_info("%s: Q6/ADSP image is unloaded\n", __func__);
return 0;
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index d1c61fe..bb33283 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -659,19 +659,6 @@
return uV;
}
-static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct rpm_regulator *reg = rdev_get_drvdata(rdev);
- int uV = 0;
-
- if (selector == 0)
- uV = reg->min_uV;
- else if (selector == 1)
- uV = reg->max_uV;
-
- return uV;
-}
-
static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
@@ -1030,7 +1017,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1043,7 +1029,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1056,7 +1041,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1069,7 +1053,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage_corner,
.get_voltage = rpm_vreg_get_voltage_corner,
- .list_voltage = rpm_vreg_list_voltage,
.set_mode = rpm_vreg_set_mode,
.get_mode = rpm_vreg_get_mode,
.get_optimum_mode = rpm_vreg_get_optimum_mode,
@@ -1089,7 +1072,6 @@
.is_enabled = rpm_vreg_is_enabled,
.set_voltage = rpm_vreg_set_voltage,
.get_voltage = rpm_vreg_get_voltage,
- .list_voltage = rpm_vreg_list_voltage,
.enable_time = rpm_vreg_enable_time,
};
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 764fbeb..b6fb88c 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -152,6 +152,8 @@
LIST_HEAD(msm_rpm_ack_list);
+static DECLARE_COMPLETION(data_ready);
+
static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
struct msm_rpm_kvp_data *kvp)
{
@@ -340,7 +342,7 @@
switch (event) {
case SMD_EVENT_DATA:
- queue_work(msm_rpm_smd_wq, &pdata->work);
+ complete(&data_ready);
break;
case SMD_EVENT_OPEN:
complete(&pdata->smd_open);
@@ -530,17 +532,19 @@
int errno;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
- if (!spin_trylock(&msm_rpm_data.smd_lock_read))
- return;
- while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
- if (msm_rpm_read_smd_data(buf)) {
- break;
+ while (1) {
+ wait_for_completion(&data_ready);
+
+ spin_lock(&msm_rpm_data.smd_lock_read);
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ if (msm_rpm_read_smd_data(buf))
+ break;
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
}
- msg_id = msm_rpm_get_msg_id_from_ack(buf);
- errno = msm_rpm_get_error_from_ack(buf);
- msm_rpm_process_ack(msg_id, errno);
+ spin_unlock(&msm_rpm_data.smd_lock_read);
}
- spin_unlock(&msm_rpm_data.smd_lock_read);
}
#define DEBUG_PRINT_BUFFER_SIZE 512
@@ -892,6 +896,9 @@
msm_rpm_free_list_entry(elem);
wait_ack_cleanup:
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ complete(&data_ready);
return rc;
}
EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
@@ -1013,6 +1020,7 @@
msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
if (!msm_rpm_smd_wq)
return -EINVAL;
+ queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
}
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/arch/arm/mach-msm/saw-regulator.c b/arch/arm/mach-msm/saw-regulator.c
index 6762648..0a81a33 100644
--- a/arch/arm/mach-msm/saw-regulator.c
+++ b/arch/arm/mach-msm/saw-regulator.c
@@ -54,11 +54,17 @@
struct regulator_dev *rdev;
char *name;
int uV;
+ int last_set_uV;
+ unsigned vlevel;
+ bool online;
};
/* Minimum core operating voltage */
#define MIN_CORE_VOLTAGE 950000
+/* Specifies an uninitialized voltage */
+#define INVALID_VOLTAGE -1
+
/* Specifies the PMIC internal slew rate in uV/us. */
#define REGULATOR_SLEW_RATE 1250
@@ -69,12 +75,32 @@
return vreg->uV;
}
+static int _set_voltage(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = msm_spm_set_vdd(rdev_get_id(rdev), vreg->vlevel);
+ if (!rc) {
+ if (vreg->uV > vreg->last_set_uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((vreg->uV - vreg->last_set_uV) /
+ REGULATOR_SLEW_RATE);
+ }
+ vreg->last_set_uV = vreg->uV;
+ } else {
+ pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
+ vreg->uV = vreg->last_set_uV;
+ }
+
+ return rc;
+}
+
static int saw_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
unsigned *selector)
{
struct saw_vreg *vreg = rdev_get_drvdata(rdev);
int uV = min_uV;
- int rc;
u8 vprog, band;
if (uV < FTSMPS_BAND1_UV_MIN && max_uV >= FTSMPS_BAND1_UV_MIN)
@@ -119,23 +145,51 @@
return -EINVAL;
}
- rc = msm_spm_set_vdd(rdev_get_id(rdev), band | vprog);
- if (!rc) {
- if (uV > vreg->uV) {
- /* Wait for voltage to stabalize. */
- udelay((uV - vreg->uV) / REGULATOR_SLEW_RATE);
- }
- vreg->uV = uV;
- } else {
- pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
- }
+ vreg->vlevel = band | vprog;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _set_voltage(rdev);
+}
+
+static int saw_enable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ if (vreg->uV != vreg->last_set_uV)
+ rc = _set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
return rc;
}
+static int saw_disable(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int saw_is_enabled(struct regulator_dev *rdev)
+{
+ struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
static struct regulator_ops saw_ops = {
.get_voltage = saw_get_voltage,
.set_voltage = saw_set_voltage,
+ .enable = saw_enable,
+ .disable = saw_disable,
+ .is_enabled = saw_is_enabled,
};
static int __devinit saw_probe(struct platform_device *pdev)
@@ -168,12 +222,13 @@
goto free_vreg;
}
- vreg->desc.name = vreg->name;
- vreg->desc.id = pdev->id;
- vreg->desc.ops = &saw_ops;
- vreg->desc.type = REGULATOR_VOLTAGE;
- vreg->desc.owner = THIS_MODULE;
- vreg->uV = MIN_CORE_VOLTAGE;
+ vreg->desc.name = vreg->name;
+ vreg->desc.id = pdev->id;
+ vreg->desc.ops = &saw_ops;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->uV = INVALID_VOLTAGE;
+ vreg->last_set_uV = MIN_CORE_VOLTAGE;
vreg->rdev = regulator_register(&vreg->desc, &pdev->dev,
init_data, vreg, NULL);
@@ -233,5 +288,4 @@
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAW regulator driver");
-MODULE_VERSION("1.0");
MODULE_ALIAS("platform:saw-regulator");
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 6013efc..f4dae89 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -204,10 +204,13 @@
return ret;
}
-static u32 cacheline_size;
-
static void scm_inv_range(unsigned long start, unsigned long end)
{
+ u32 cacheline_size, ctr;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
start = round_down(start, cacheline_size);
end = round_up(end, cacheline_size);
outer_inv_range(start, end);
@@ -444,13 +447,3 @@
}
EXPORT_SYMBOL(scm_get_feat_version);
-static int scm_init(void)
-{
- u32 ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- return 0;
-}
-early_initcall(scm_init);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 6cb9339..2743547 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -303,7 +303,13 @@
[154] = MSM_CPU_8930AB,
[155] = MSM_CPU_8930AB,
[156] = MSM_CPU_8930AB,
- [157] = MSM_CPU_8930AB
+ [157] = MSM_CPU_8930AB,
+
+ /* 8625Q IDs */
+ [168] = MSM_CPU_8625Q,
+ [169] = MSM_CPU_8625Q,
+ [170] = MSM_CPU_8625Q,
+
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e2cd0120..0ebc2b9 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -375,29 +375,61 @@
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
+unsigned long memory_hole_offset;
+EXPORT_SYMBOL(memory_hole_offset);
+unsigned long memory_hole_start;
+EXPORT_SYMBOL(memory_hole_start);
+unsigned long memory_hole_end;
+EXPORT_SYMBOL(memory_hole_end);
+
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-unsigned long membank0_size;
-EXPORT_SYMBOL(membank0_size);
-unsigned long membank1_start;
-EXPORT_SYMBOL(membank1_start);
-
-void __init find_membank0_hole(void)
+void find_memory_hole(void)
{
- sort(&meminfo.bank, meminfo.nr_banks,
- sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+ int i;
+ unsigned long hole_start;
+ unsigned long hole_size;
- membank0_size = meminfo.bank[0].size;
- membank1_start = meminfo.bank[1].start;
+ /*
+ * Find the start and end of the hole, using meminfo
+ * if it hasnt been found already.
+ */
+ if (memory_hole_start == 0 && memory_hole_end == 0) {
+ for (i = 0; i < (meminfo.nr_banks - 1); i++) {
+ if ((meminfo.bank[i].start + meminfo.bank[i].size) !=
+ meminfo.bank[i+1].start) {
+ if (meminfo.bank[i].start + meminfo.bank[i].size
+ <= MAX_HOLE_ADDRESS) {
+
+ hole_start = meminfo.bank[i].start +
+ meminfo.bank[i].size;
+ hole_size = meminfo.bank[i+1].start -
+ hole_start;
+
+ if (memory_hole_start == 0 &&
+ memory_hole_end == 0) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start +
+ hole_size;
+ } else if ((memory_hole_end -
+ memory_hole_start) <= hole_size) {
+ memory_hole_start = hole_start;
+ memory_hole_end = hole_start +
+ hole_size;
+ }
+ }
+ }
+ }
+ }
+ memory_hole_offset = memory_hole_start - PHYS_OFFSET;
}
+
#endif
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
-#ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-#endif
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1cb6cba..8575f78 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -961,7 +961,7 @@
int i, j, highmem = 0;
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
- find_membank0_hole();
+ find_memory_hole();
#endif
#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index 8f8707f..91fcdfc 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -30,9 +30,9 @@
SDIO Transport Layer for DIAG Router
endmenu
-menu "HSIC support for DIAG"
+menu "HSIC/SMUX support for DIAG"
-config DIAG_BRIDGE_CODE
+config DIAGFWD_BRIDGE_CODE
depends on USB_QCOM_DIAG_BRIDGE
default y
bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
index 6ecc970..c9204ea 100644
--- a/drivers/char/diag/Makefile
+++ b/drivers/char/diag/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_DIAG_CHAR) := diagchar.o
obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_hsic.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_smux.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_hsic.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_smux.o
diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5cd5ce9..e78a2aa 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -134,33 +134,37 @@
pr_alert("diag: No matching PID for DCI data\n");
/* Using PID of client process, find client buffer */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (curr_client_pid == driver->dci_client_tbl[i].client->tgid) {
- /* copy pkt rsp in client buf */
- entry = &(driver->dci_client_tbl[i]);
- if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
- pr_alert("diag: create capacity for pkt rsp\n");
- entry->total_capacity += 8+write_len;
- temp_buf = krealloc(entry->dci_data,
- entry->total_capacity, GFP_KERNEL);
- if (!temp_buf) {
- pr_err("diag: DCI realloc failed\n");
- break;
- } else {
- entry->dci_data = temp_buf;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (curr_client_pid ==
+ driver->dci_client_tbl[i].client->tgid) {
+ /* copy pkt rsp in client buf */
+ entry = &(driver->dci_client_tbl[i]);
+ if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
+ pr_alert("diag: create capacity for pkt rsp\n");
+ entry->total_capacity += 8+write_len;
+ temp_buf = krealloc(entry->dci_data,
+ entry->total_capacity, GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err("diag: DCI realloc failed\n");
+ break;
+ } else {
+ entry->dci_data = temp_buf;
+ }
}
- }
- *(int *)(entry->dci_data+entry->data_len) =
+ *(int *)(entry->dci_data+entry->data_len) =
DCI_PKT_RSP_TYPE;
- entry->data_len += 4;
- *(int *)(entry->dci_data+entry->data_len) = write_len;
- entry->data_len += 4;
- memcpy(entry->dci_data+entry->data_len,
- buf+4+cmd_code_len, write_len);
- entry->data_len += write_len;
- /* delete immediate response entry */
- if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
- driver->req_tracking_tbl[index].pid = 0;
- break;
+ entry->data_len += 4;
+ *(int *)(entry->dci_data+entry->data_len)
+ = write_len;
+ entry->data_len += 4;
+ memcpy(entry->dci_data+entry->data_len,
+ buf+4+cmd_code_len, write_len);
+ entry->data_len += write_len;
+ /* delete immediate response entry */
+ if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
+ driver->req_tracking_tbl[index].pid = 0;
+ break;
+ }
}
}
}
@@ -408,6 +412,7 @@
int count, set_mask, num_codes, byte_index, bit_index, event_id;
uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
uint8_t *event_mask_ptr;
+ int offset = 0;
/* This is Pkt request/response transaction */
if (*(int *)temp > 0) {
@@ -463,10 +468,12 @@
} else if (*(int *)temp == DCI_LOG_TYPE) {
/* find client id and table */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client->tgid ==
- current->tgid) {
- found = 1;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (driver->dci_client_tbl[i].client->tgid ==
+ current->tgid) {
+ found = 1;
+ break;
+ }
}
}
if (!found) {
@@ -495,6 +502,7 @@
*/
log_mask_ptr = head_log_mask_ptr;
found = 0;
+ offset = 0;
while (log_mask_ptr) {
if (*log_mask_ptr == equip_id) {
found = 1;
@@ -505,6 +513,7 @@
pr_debug("diag: did not find equip id = %x at %p\n",
equip_id, log_mask_ptr);
log_mask_ptr += 514;
+ offset += 514;
}
}
if (!found) {
@@ -517,21 +526,25 @@
*log_mask_ptr |= byte_mask;
else
*log_mask_ptr &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_log_mask(
+ offset, byte_index,
+ byte_mask);
temp += 2;
count++;
ret = DIAG_DCI_NO_ERROR;
}
- /* add to cumulative mask */
- update_dci_cumulative_log_mask(i);
/* send updated mask to peripherals */
diag_send_dci_log_mask(driver->ch_cntl);
} else if (*(int *)temp == DCI_EVENT_TYPE) {
/* find client id and table */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client->tgid ==
- current->tgid) {
- found = 1;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ if (driver->dci_client_tbl[i].client->tgid ==
+ current->tgid) {
+ found = 1;
+ break;
+ }
}
}
if (!found) {
@@ -561,12 +574,12 @@
*(event_mask_ptr + byte_index) |= byte_mask;
else
*(event_mask_ptr + byte_index) &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_event_mask(byte_index, byte_mask);
temp += sizeof(int);
count++;
ret = DIAG_DCI_NO_ERROR;
}
- /* add to cumulative mask */
- update_dci_cumulative_event_mask(i);
/* send updated mask to peripherals */
diag_send_dci_event_mask(driver->ch_cntl);
} else {
@@ -575,16 +588,29 @@
return ret;
}
-void update_dci_cumulative_event_mask(int client_index)
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
{
int i;
- uint8_t *update_ptr = dci_cumulative_event_mask;
uint8_t *event_mask_ptr;
+ uint8_t *update_ptr = dci_cumulative_event_mask;
+ bool is_set = false;
mutex_lock(&dci_event_mask_mutex);
- event_mask_ptr = driver->dci_client_tbl[client_index].dci_event_mask;
- for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
- *(update_ptr+i) |= *(event_mask_ptr+i);
+ update_ptr += offset;
+ for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ event_mask_ptr =
+ driver->dci_client_tbl[i].dci_event_mask;
+ event_mask_ptr += offset;
+ if ((*event_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the event mask set */
+ break;
+ }
+ }
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
mutex_unlock(&dci_event_mask_mutex);
}
@@ -624,27 +650,39 @@
mutex_unlock(&driver->diag_cntl_mutex);
}
-void update_dci_cumulative_log_mask(int client_index)
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+ uint8_t byte_mask)
{
- int i, j;
+ int i;
uint8_t *update_ptr = dci_cumulative_log_mask;
- uint8_t *log_mask_ptr =
- driver->dci_client_tbl[client_index].dci_log_mask;
+ uint8_t *log_mask_ptr;
+ bool is_set = false;
mutex_lock(&dci_log_mask_mutex);
- *update_ptr = 0; /* add first equip id */
- /* skip the first equip id */
- update_ptr++; log_mask_ptr++;
- for (i = 0; i < 16; i++) {
- for (j = 0; j < 513; j++) {
- *update_ptr |= *log_mask_ptr;
- update_ptr++;
- log_mask_ptr++;
+ *update_ptr = 0;
+ /* set the equipment IDs */
+ for (i = 0; i < 16; i++)
+ *(update_ptr + (i*514)) = i;
+
+ update_ptr += offset;
+ /* update the dirty bit */
+ *(update_ptr+1) = 1;
+ update_ptr = update_ptr + byte_index;
+ for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ log_mask_ptr =
+ (driver->dci_client_tbl[i].dci_log_mask);
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ if ((*log_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the log mask set */
+ break;
}
- *update_ptr = i+1;
- update_ptr++;
- log_mask_ptr++;
}
+
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
mutex_unlock(&dci_log_mask_mutex);
}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index afcabcc..435c750 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -85,11 +85,12 @@
void extract_dci_pkt_rsp(unsigned char *buf);
/* DCI Log streaming functions */
void create_dci_log_mask_tbl(unsigned char *tbl_buf);
-void update_dci_cumulative_log_mask(int client_index);
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+ uint8_t byte_mask);
void diag_send_dci_log_mask(smd_channel_t *ch);
void extract_dci_log(unsigned char *buf);
/* DCI event streaming functions */
-void update_dci_cumulative_event_mask(int client_index);
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask);
void diag_send_dci_event_mask(smd_channel_t *ch);
void extract_dci_events(unsigned char *buf);
void create_dci_event_mask_tbl(unsigned char *tbl_buf);
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index ed0f08e..7863f74 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include "diagchar.h"
#include "diagfwd.h"
+#include "diagfwd_bridge.h"
#define DEBUG_BUF_SIZE 4096
static struct dentry *diag_dbgfs_dent;
@@ -195,8 +196,8 @@
return ret;
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-static ssize_t diag_dbgfs_read_hsic(struct file *file, char __user *ubuf,
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char *buf;
@@ -220,13 +221,17 @@
"count_hsic_write_pool: %d\n"
"diag_hsic_pool: %x\n"
"diag_hsic_write_pool: %x\n"
- "write_len_mdm: %d\n"
+ "HSIC write_len: %d\n"
"num_hsic_buf_tbl_entries: %d\n"
- "usb_mdm_connected: %d\n"
- "diag_read_mdm_work: %d\n"
+ "HSIC usb_connected: %d\n"
+ "HSIC diag_read_work: %d\n"
"diag_read_hsic_work: %d\n"
"diag_disconnect_work: %d\n"
- "diag_usb_read_complete_work: %d\n",
+ "diag_usb_read_complete_work: %d\n"
+ "smux ch: %d"
+ "smux enabled %d"
+ "smux in busy %d"
+ "smux connected %d",
driver->hsic_ch,
driver->hsic_inited,
driver->hsic_device_enabled,
@@ -238,13 +243,17 @@
driver->count_hsic_write_pool,
(unsigned int)driver->diag_hsic_pool,
(unsigned int)driver->diag_hsic_write_pool,
- driver->write_len_mdm,
+ diag_bridge[HSIC].write_len,
driver->num_hsic_buf_tbl_entries,
- driver->usb_mdm_connected,
- work_pending(&(driver->diag_read_mdm_work)),
+ diag_bridge[HSIC].usb_connected,
+ work_pending(&(diag_bridge[HSIC].diag_read_work)),
work_pending(&(driver->diag_read_hsic_work)),
work_pending(&(driver->diag_disconnect_work)),
- work_pending(&(driver->diag_usb_read_complete_work)));
+ work_pending(&(diag_bridge[HSIC].usb_read_complete_work)),
+ driver->lcid,
+ driver->diag_smux_enabled,
+ driver->in_busy_smux,
+ driver->smux_connected);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
@@ -252,8 +261,8 @@
return ret;
}
-const struct file_operations diag_dbgfs_hsic_ops = {
- .read = diag_dbgfs_read_hsic,
+const struct file_operations diag_dbgfs_bridge_ops = {
+ .read = diag_dbgfs_read_bridge,
};
#endif
@@ -284,9 +293,9 @@
debugfs_create_file("work_pending", 0444, diag_dbgfs_dent, 0,
&diag_dbgfs_workpending_ops);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
- debugfs_create_file("hsic", 0444, diag_dbgfs_dent, 0,
- &diag_dbgfs_hsic_ops);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_bridge_ops);
#endif
diag_dbgfs_table_index = 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index de3cf52..d1ec5f2 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -273,7 +273,9 @@
struct diag_request *usb_read_mdm_ptr;
struct diag_request *write_ptr_mdm;
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ /* common for all bridges */
+ struct work_struct diag_disconnect_work;
/* SGLTE variables */
int lcid;
unsigned char *buf_in_smux;
@@ -290,18 +292,6 @@
int in_busy_hsic_read_on_device;
int in_busy_hsic_write;
struct work_struct diag_read_hsic_work;
- struct mutex bridge_mutex;
- /* USB MDM channel variables */
- int usb_mdm_connected;
- int read_len_mdm;
- int write_len_mdm;
- unsigned char *usb_buf_mdm_out;
- struct usb_diag_ch *mdm_ch;
- struct workqueue_struct *diag_bridge_wq;
- struct work_struct diag_read_mdm_work;
- struct work_struct diag_disconnect_work;
- struct work_struct diag_usb_read_complete_work;
- struct diag_request *usb_read_mdm_ptr;
int count_hsic_pool;
int count_hsic_write_pool;
unsigned int poolsize_hsic;
@@ -316,5 +306,6 @@
#endif
};
+extern struct diag_bridge_dev *diag_bridge;
extern struct diagchar_dev *driver;
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 7b17ce4..645d916 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -32,13 +32,14 @@
#ifdef CONFIG_DIAG_SDIO_PIPE
#include "diagfwd_sdio.h"
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
#include "diagfwd_hsic.h"
#include "diagfwd_smux.h"
#endif
#include <linux/timer.h>
#include "diag_debugfs.h"
#include "diag_masks.h"
+#include "diagfwd_bridge.h"
MODULE_DESCRIPTION("Diag Char Driver");
MODULE_LICENSE("GPL v2");
@@ -127,7 +128,7 @@
mutex_unlock(&driver->diagchar_mutex);
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diag_clear_hsic_tbl(void)
{
int i;
@@ -278,7 +279,7 @@
if (driver->logging_process_id == current->tgid) {
driver->logging_mode = USB_MODE;
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diag_clear_hsic_tbl();
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
@@ -708,7 +709,7 @@
#ifdef CONFIG_DIAG_SDIO_PIPE
driver->in_busy_sdio = 1;
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_disconnect_bridge(0);
diag_clear_hsic_tbl();
#endif
@@ -737,7 +738,7 @@
queue_work(driver->diag_sdio_wq,
&(driver->diag_read_sdio_work));
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_connect_bridge(0);
#endif
}
@@ -745,13 +746,13 @@
else if (temp == USB_MODE && driver->logging_mode
== NO_LOGGING_MODE) {
diagfwd_disconnect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_disconnect_bridge(0);
#endif
} else if (temp == NO_LOGGING_MODE && driver->logging_mode
== USB_MODE) {
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_connect_bridge(0);
#endif
} else if (temp == USB_MODE && driver->logging_mode
@@ -781,14 +782,14 @@
queue_work(driver->diag_sdio_wq,
&(driver->diag_read_sdio_work));
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
#endif
} else if (temp == MEMORY_DEVICE_MODE &&
driver->logging_mode == USB_MODE) {
diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diag_clear_hsic_tbl();
diagfwd_cancel_hsic();
diagfwd_connect_bridge(0);
@@ -814,7 +815,7 @@
struct diag_dci_client_tbl *entry;
int index = -1, i = 0, ret = 0;
int num_data = 0, data_type;
-#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAG_BRIDGE_CODE)
+#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAGFWD_BRIDGE_CODE)
int mdm_token = MDM_TOKEN;
#endif
@@ -833,7 +834,7 @@
if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) && (driver->
logging_mode == MEMORY_DEVICE_MODE)) {
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
unsigned long spin_lock_flags;
struct diag_write_device hsic_buf_tbl[NUM_HSIC_BUF_TBL_ENTRIES];
#endif
@@ -969,7 +970,7 @@
driver->in_busy_sdio = 0;
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
spin_lock_irqsave(&driver->hsic_spinlock, spin_lock_flags);
for (i = 0; i < driver->poolsize_hsic_write; i++) {
hsic_buf_tbl[i].buf = driver->hsic_buf_tbl[i].buf;
@@ -1120,14 +1121,17 @@
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
/* check the current client and copy its data */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- entry = &(driver->dci_client_tbl[i]);
- if (entry && (current->tgid == entry->client->tgid)) {
- COPY_USER_SPACE_OR_EXIT(buf+4,
- entry->data_len, 4);
- COPY_USER_SPACE_OR_EXIT(buf+8,
- *(entry->dci_data), entry->data_len);
- entry->data_len = 0;
- break;
+ if (driver->dci_client_tbl[i].client != NULL) {
+ entry = &(driver->dci_client_tbl[i]);
+ if (entry && (current->tgid ==
+ entry->client->tgid)) {
+ COPY_USER_SPACE_OR_EXIT(buf+4,
+ entry->data_len, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+8,
+ *(entry->dci_data), entry->data_len);
+ entry->data_len = 0;
+ break;
+ }
}
}
driver->data_ready[index] ^= DCI_DATA_TYPE;
@@ -1199,7 +1203,8 @@
/* Check masks for On-Device logging */
if (driver->mask_check) {
- if (!mask_request_validate(driver->user_space_data)) {
+ if (!mask_request_validate(driver->user_space_data +
+ token_offset)) {
pr_alert("diag: mask request Invalid\n");
return -EFAULT;
}
@@ -1224,7 +1229,7 @@
}
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
/* send masks to 9k too */
if (driver->hsic_ch && (payload_size > 0) && remote_data) {
/* wait sending mask updates if HSIC ch not ready */
@@ -1530,6 +1535,13 @@
return 0;
}
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_disconnect_work_fn(struct work_struct *w)
+{
+ diagfwd_disconnect_bridge(1);
+}
+#endif
+
#ifdef CONFIG_DIAG_SDIO_PIPE
void diag_sdio_fn(int type)
{
@@ -1544,16 +1556,14 @@
inline void diag_sdio_fn(int type) {}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-void diag_bridge_fn(int type)
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+void diagfwd_bridge_fn(int type)
{
- if (type == INIT)
- diagfwd_bridge_init();
- else if (type == EXIT)
+ if (type == EXIT)
diagfwd_bridge_exit();
}
#else
-inline void diag_bridge_fn(int type) {}
+inline void diagfwd_bridge_fn(int type) { }
#endif
static int __init diagchar_init(void)
@@ -1563,6 +1573,12 @@
pr_debug("diagfwd initializing ..\n");
driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ diag_bridge = kzalloc(MAX_BRIDGES * sizeof(struct diag_bridge_dev),
+ GFP_KERNEL);
+ if (!diag_bridge)
+ pr_warning("diag: could not allocate memory for bridge\n");
+#endif
if (driver) {
driver->used = 0;
@@ -1607,10 +1623,16 @@
diag_debugfs_init();
diag_masks_init();
diagfwd_init();
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ diagfwd_bridge_init(HSIC);
+ diagfwd_bridge_init(SMUX);
+ INIT_WORK(&(driver->diag_disconnect_work),
+ diag_disconnect_work_fn);
+#endif
diagfwd_cntl_init();
driver->dci_state = diag_dci_init();
diag_sdio_fn(INIT);
- diag_bridge_fn(INIT);
+
pr_debug("diagchar initializing ..\n");
driver->num = 1;
driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
@@ -1645,7 +1667,7 @@
diagfwd_cntl_exit();
diag_masks_exit();
diag_sdio_fn(EXIT);
- diag_bridge_fn(EXIT);
+ diagfwd_bridge_fn(EXIT);
return -1;
}
@@ -1659,7 +1681,7 @@
diagfwd_cntl_exit();
diag_masks_exit();
diag_sdio_fn(EXIT);
- diag_bridge_fn(EXIT);
+ diagfwd_bridge_fn(EXIT);
diag_debugfs_cleanup();
diagchar_cleanup();
printk(KERN_INFO "done diagchar exit\n");
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 978b63b..cee4c96 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -40,6 +40,7 @@
#endif
#include "diag_dci.h"
#include "diag_masks.h"
+#include "diagfwd_bridge.h"
#define MODE_CMD 41
#define RESET_ID 2
@@ -327,7 +328,7 @@
}
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
unsigned long flags;
int foundIndex = -1;
@@ -337,7 +338,7 @@
if (driver->hsic_buf_tbl[i].length == 0) {
driver->hsic_buf_tbl[i].buf = buf;
driver->hsic_buf_tbl[i].length =
- driver->write_len_mdm;
+ diag_bridge[HSIC].write_len;
driver->num_hsic_buf_tbl_entries++;
foundIndex = i;
break;
@@ -349,7 +350,7 @@
else
pr_debug("diag: ENQUEUE HSIC buf ptr and length is %x , %d\n",
(unsigned int)buf,
- driver->write_len_mdm);
+ diag_bridge[HSIC].write_len);
}
#endif
for (i = 0; i < driver->num_clients; i++)
@@ -386,10 +387,10 @@
&(driver->diag_read_sdio_work));
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
if (driver->hsic_ch)
- queue_work(driver->diag_bridge_wq,
+ queue_work(diag_bridge[HSIC].wq,
&(driver->diag_read_hsic_work));
}
#endif
@@ -436,7 +437,7 @@
"while USB write\n");
}
#endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
else if (proc_num == HSIC_DATA) {
if (driver->hsic_device_enabled) {
struct diag_request *write_ptr_mdm;
@@ -447,9 +448,10 @@
if (write_ptr_mdm) {
write_ptr_mdm->buf = buf;
write_ptr_mdm->length =
- driver->write_len_mdm;
- err = usb_diag_write(driver->mdm_ch,
- write_ptr_mdm);
+ diag_bridge[HSIC].write_len;
+ write_ptr_mdm->context = (void *)HSIC;
+ err = usb_diag_write(
+ diag_bridge[HSIC].ch, write_ptr_mdm);
/* Return to the pool immediately */
if (err) {
diagmem_free(driver,
@@ -463,14 +465,16 @@
err = -1;
}
} else {
- pr_err("diag: Incorrect hsic data "
+ pr_err("diag: Incorrect HSIC data "
"while USB write\n");
err = -1;
}
} else if (proc_num == SMUX_DATA) {
write_ptr->buf = buf;
+ write_ptr->context = (void *)SMUX;
pr_debug("diag: writing SMUX data\n");
- err = usb_diag_write(driver->mdm_ch, write_ptr);
+ err = usb_diag_write(diag_bridge[SMUX].ch,
+ write_ptr);
}
#endif
APPEND_DEBUG('d');
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 0000000..75fdeb4
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,355 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diagmem.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+
+struct diag_bridge_dev *diag_bridge;
+
+/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
+int diagfwd_connect_bridge(int process_cable)
+{
+ int i;
+
+ pr_debug("diag: in %s\n", __func__);
+
+ for (i = 0; i < MAX_BRIDGES; i++)
+ if (diag_bridge[i].enabled)
+ connect_bridge(process_cable, i);
+ return 0;
+}
+
+void connect_bridge(int process_cable, int index)
+{
+ int err;
+
+ mutex_lock(&diag_bridge[index].bridge_mutex);
+ /* If the usb cable is being connected */
+ if (process_cable) {
+ err = usb_diag_alloc_req(diag_bridge[index].ch, N_MDM_WRITE,
+ N_MDM_READ);
+ if (err)
+ pr_err("diag: unable to alloc USB req on mdm ch err:%d\n",
+ err);
+
+ diag_bridge[index].usb_connected = 1;
+ }
+
+ if (index == SMUX && driver->diag_smux_enabled) {
+ driver->in_busy_smux = 0;
+ diagfwd_connect_smux();
+ } else if (index == HSIC && driver->hsic_device_enabled) {
+ driver->in_busy_hsic_read_on_device = 0;
+ driver->in_busy_hsic_write = 0;
+ /* If the HSIC (diag_bridge) platform device is not open */
+ if (!driver->hsic_device_opened) {
+ err = diag_bridge_open(&hsic_diag_bridge_ops);
+ if (err) {
+ pr_err("diag: HSIC channel open error: %d\n",
+ err);
+ } else {
+ pr_debug("diag: opened HSIC channel\n");
+ driver->hsic_device_opened = 1;
+ }
+ } else {
+ pr_debug("diag: HSIC channel already open\n");
+ }
+ /*
+ * Turn on communication over usb mdm and HSIC, if the HSIC
+ * device driver is enabled and opened
+ */
+ if (driver->hsic_device_opened) {
+ driver->hsic_ch = 1;
+ /* Poll USB mdm channel to check for data */
+ if (driver->logging_mode == USB_MODE)
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
+ /* Poll HSIC channel to check for data */
+ queue_work(diag_bridge[HSIC].wq,
+ &driver->diag_read_hsic_work);
+ }
+ }
+ mutex_unlock(&diag_bridge[index].bridge_mutex);
+}
+
+/*
+ * diagfwd_disconnect_bridge is called when the USB mdm channel
+ * is disconnected. So disconnect should happen for all bridges
+ */
+int diagfwd_disconnect_bridge(int process_cable)
+{
+ int i;
+ pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
+
+ for (i = 0; i < MAX_BRIDGES; i++) {
+ if (diag_bridge[i].enabled) {
+ mutex_lock(&diag_bridge[i].bridge_mutex);
+ /* If the usb cable is being disconnected */
+ if (process_cable) {
+ diag_bridge[i].usb_connected = 0;
+ usb_diag_free_req(diag_bridge[i].ch);
+ }
+
+ if (i == HSIC && driver->hsic_device_enabled &&
+ driver->logging_mode != MEMORY_DEVICE_MODE) {
+ driver->in_busy_hsic_read_on_device = 1;
+ driver->in_busy_hsic_write = 1;
+ /* Turn off communication over usb and HSIC */
+ diag_hsic_close();
+ } else if (i == SMUX && driver->diag_smux_enabled &&
+ driver->logging_mode == USB_MODE) {
+ driver->in_busy_smux = 1;
+ driver->lcid = LCID_INVALID;
+ driver->smux_connected = 0;
+ /* Turn off communication over usb and smux */
+ msm_smux_close(LCID_VALID);
+ }
+ mutex_unlock(&diag_bridge[i].bridge_mutex);
+ }
+ }
+ return 0;
+}
+
+/* Called after the asychronous usb_diag_read() on mdm channel is complete */
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+{
+ int index = (int)(diag_read_ptr->context);
+
+ /* The read of the usb on the mdm (not HSIC/SMUX) has completed */
+ diag_bridge[index].read_len = diag_read_ptr->actual;
+
+ if (index == SMUX) {
+ if (driver->diag_smux_enabled) {
+ diagfwd_read_complete_smux();
+ return 0;
+ } else {
+ pr_warning("diag: incorrect callback for smux\n");
+ }
+ }
+
+ /* If SMUX not enabled, check for HSIC */
+ driver->in_busy_hsic_read_on_device = 0;
+ if (!driver->hsic_ch) {
+ pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
+ return 0;
+ }
+
+ /*
+ * The read of the usb driver on the mdm channel has completed.
+ * If there is no write on the HSIC in progress, check if the
+ * read has data to pass on to the HSIC. If so, pass the usb
+ * mdm data on to the HSIC.
+ */
+ if (!driver->in_busy_hsic_write && diag_bridge[HSIC].usb_buf_out &&
+ (diag_bridge[HSIC].read_len > 0)) {
+
+ /*
+ * Initiate the HSIC write. The HSIC write is
+ * asynchronous. When complete the write
+ * complete callback function will be called
+ */
+ int err;
+ driver->in_busy_hsic_write = 1;
+ err = diag_bridge_write(diag_bridge[HSIC].usb_buf_out,
+ diag_bridge[HSIC].read_len);
+ if (err) {
+ pr_err_ratelimited("diag: mdm data on HSIC write err: %d\n",
+ err);
+ /*
+ * If the error is recoverable, then clear
+ * the write flag, so we will resubmit a
+ * write on the next frame. Otherwise, don't
+ * resubmit a write on the next frame.
+ */
+ if ((-ENODEV) != err)
+ driver->in_busy_hsic_write = 0;
+ }
+ }
+
+ /*
+ * If there is no write of the usb mdm data on the
+ * HSIC channel
+ */
+ if (!driver->in_busy_hsic_write)
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
+
+ return 0;
+}
+
+static void diagfwd_bridge_notifier(void *priv, unsigned event,
+ struct diag_request *d_req)
+{
+ int index;
+
+ switch (event) {
+ case USB_DIAG_CONNECT:
+ diagfwd_connect_bridge(1);
+ break;
+ case USB_DIAG_DISCONNECT:
+ queue_work(driver->diag_wq,
+ &driver->diag_disconnect_work);
+ break;
+ case USB_DIAG_READ_DONE:
+ index = (int)(d_req->context);
+ queue_work(diag_bridge[index].wq,
+ &diag_bridge[index].usb_read_complete_work);
+ break;
+ case USB_DIAG_WRITE_DONE:
+ index = (int)(d_req->context);
+ if (index == HSIC && driver->hsic_device_enabled)
+ diagfwd_write_complete_hsic(d_req);
+ else if (index == SMUX && driver->diag_smux_enabled)
+ diagfwd_write_complete_smux();
+ break;
+ default:
+ pr_err("diag: in %s: Unknown event from USB diag:%u\n",
+ __func__, event);
+ break;
+ }
+}
+
+void diagfwd_bridge_init(int index)
+{
+ int ret;
+ unsigned char name[20];
+
+ if (index == HSIC)
+ strlcpy(name, "hsic", sizeof(name));
+ else
+ strlcpy(name, "smux", sizeof(name));
+
+ strlcpy(diag_bridge[index].name, name, sizeof(diag_bridge[index].name));
+ strlcat(name, "_diag_wq", sizeof(diag_bridge[index].name));
+ diag_bridge[index].enabled = 1;
+ diag_bridge[index].wq = create_singlethread_workqueue(name);
+ diag_bridge[index].read_len = 0;
+ diag_bridge[index].write_len = 0;
+ if (diag_bridge[index].usb_buf_out == NULL)
+ diag_bridge[index].usb_buf_out =
+ kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+ if (diag_bridge[index].usb_buf_out == NULL)
+ goto err;
+ if (diag_bridge[index].usb_read_ptr == NULL)
+ diag_bridge[index].usb_read_ptr =
+ kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+ if (diag_bridge[index].usb_read_ptr == NULL)
+ goto err;
+ if (diag_bridge[index].usb_read_ptr->context == NULL)
+ diag_bridge[index].usb_read_ptr->context =
+ kzalloc(sizeof(int), GFP_KERNEL);
+ if (diag_bridge[index].usb_read_ptr->context == NULL)
+ goto err;
+ mutex_init(&diag_bridge[index].bridge_mutex);
+
+ if (index == HSIC) {
+ INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+ diag_usb_read_complete_hsic_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+ INIT_WORK(&(diag_bridge[index].diag_read_work),
+ diag_read_usb_hsic_work_fn);
+ diag_bridge[index].ch = usb_diag_open(DIAG_MDM, (void *)index,
+ diagfwd_bridge_notifier);
+ if (IS_ERR(diag_bridge[index].ch)) {
+ pr_err("diag: Unable to open USB diag MDM channel\n");
+ goto err;
+ }
+#endif
+ /* register HSIC device */
+ ret = platform_driver_register(&msm_hsic_ch_driver);
+ if (ret)
+ pr_err("diag: could not register HSIC device, ret: %d\n",
+ ret);
+ } else if (index == SMUX) {
+ INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+ diag_usb_read_complete_smux_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+ INIT_WORK(&(diag_bridge[index].diag_read_work),
+ diag_read_usb_smux_work_fn);
+ diag_bridge[index].ch = usb_diag_open(DIAG_QSC, (void *)index,
+ diagfwd_bridge_notifier);
+ if (IS_ERR(diag_bridge[index].ch)) {
+ pr_err("diag: Unable to open USB diag QSC channel\n");
+ goto err;
+ }
+#endif
+ ret = platform_driver_register(&msm_diagfwd_smux_driver);
+ if (ret)
+ pr_err("diag: could not register SMUX device, ret: %d\n",
+ ret);
+ }
+ return;
+err:
+ pr_err("diag: Could not initialize for bridge forwarding\n");
+ kfree(diag_bridge[index].usb_buf_out);
+ kfree(driver->hsic_buf_tbl);
+ kfree(driver->write_ptr_mdm);
+ kfree(diag_bridge[index].usb_read_ptr);
+ if (diag_bridge[index].wq)
+ destroy_workqueue(diag_bridge[index].wq);
+ return;
+}
+
+void diagfwd_bridge_exit(void)
+{
+ int i;
+ pr_debug("diag: in %s\n", __func__);
+
+ if (driver->hsic_device_enabled) {
+ diag_hsic_close();
+ driver->hsic_device_enabled = 0;
+ diag_bridge[HSIC].enabled = 0;
+ }
+ driver->hsic_inited = 0;
+ diagmem_exit(driver, POOL_TYPE_ALL);
+ if (driver->diag_smux_enabled) {
+ driver->lcid = LCID_INVALID;
+ kfree(driver->buf_in_smux);
+ driver->diag_smux_enabled = 0;
+ diag_bridge[SMUX].enabled = 0;
+ }
+ platform_driver_unregister(&msm_hsic_ch_driver);
+ platform_driver_unregister(&msm_diagfwd_smux_driver);
+ /* destroy USB MDM specific variables */
+ for (i = 0; i < MAX_BRIDGES; i++) {
+ if (diag_bridge[i].enabled) {
+#ifdef CONFIG_DIAG_OVER_USB
+ if (diag_bridge[i].usb_connected)
+ usb_diag_free_req(diag_bridge[i].ch);
+ usb_diag_close(diag_bridge[i].ch);
+#endif
+ kfree(diag_bridge[i].usb_buf_out);
+ kfree(diag_bridge[i].usb_read_ptr);
+ destroy_workqueue(diag_bridge[i].wq);
+ diag_bridge[i].enabled = 0;
+ }
+ }
+ kfree(driver->hsic_buf_tbl);
+ kfree(driver->write_ptr_mdm);
+}
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 0000000..06e6a96
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+#include "diagfwd.h"
+
+#define MAX_BRIDGES 5
+#define HSIC 0
+#define SMUX 1
+
+int diagfwd_connect_bridge(int);
+void connect_bridge(int, int);
+int diagfwd_disconnect_bridge(int);
+void diagfwd_bridge_init(int index);
+void diagfwd_bridge_exit(void);
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr);
+
+/* Diag-Bridge structure, n bridges can be used at same time
+ * for instance SMUX, HSIC working at same time
+ */
+struct diag_bridge_dev {
+ char name[20];
+ int enabled;
+ struct mutex bridge_mutex;
+ int usb_connected;
+ int read_len;
+ int write_len;
+ unsigned char *usb_buf_out;
+ struct usb_diag_ch *ch;
+ struct workqueue_struct *wq;
+ struct work_struct diag_read_work;
+ struct diag_request *usb_read_ptr;
+ struct work_struct usb_read_complete_work;
+};
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index 7aef01f..3d5eea5 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -31,6 +31,7 @@
#include "diagfwd.h"
#include "diagfwd_hsic.h"
#include "diagfwd_smux.h"
+#include "diagfwd_bridge.h"
#define READ_HSIC_BUF_SIZE 2048
@@ -72,7 +73,7 @@
write_ptrs_available--;
/*
- * No sense queuing a read if the hsic bridge was
+ * No sense queuing a read if the HSIC bridge was
* closed in another thread
*/
if (!driver->hsic_ch)
@@ -82,7 +83,7 @@
POOL_TYPE_HSIC);
if (buf_in_hsic) {
/*
- * Initiate the read from the hsic. The hsic read is
+ * Initiate the read from the HSIC. The HSIC read is
* asynchronous. Once the read is complete the read
* callback function will be called.
*/
@@ -116,7 +117,7 @@
if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
(num_reads_submitted == 0) && (err != -ENODEV) &&
(driver->hsic_ch != 0))
- queue_work(driver->diag_bridge_wq,
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
@@ -127,7 +128,7 @@
if (!driver->hsic_ch) {
/*
- * The hsic channel is closed. Return the buffer to
+ * The HSIC channel is closed. Return the buffer to
* the pool. Do not send it on.
*/
diagmem_free(driver, buf, POOL_TYPE_HSIC);
@@ -149,7 +150,7 @@
* Send data in buf to be written on the
* appropriate device, e.g. USB MDM channel
*/
- driver->write_len_mdm = actual_size;
+ diag_bridge[HSIC].write_len = actual_size;
err = diag_device_write((void *)buf, HSIC_DATA, NULL);
/* If an error, return buffer to the pool */
if (err) {
@@ -170,13 +171,13 @@
}
/*
- * If for some reason there was no hsic data to write to the
+ * If for some reason there was no HSIC data to write to the
* mdm channel, set up another read
*/
if (err &&
((driver->logging_mode == MEMORY_DEVICE_MODE) ||
- (driver->usb_mdm_connected && !driver->hsic_suspend))) {
- queue_work(driver->diag_bridge_wq,
+ (diag_bridge[HSIC].usb_connected && !driver->hsic_suspend))) {
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
}
@@ -195,8 +196,10 @@
if (actual_size < 0)
pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size);
- if (driver->usb_mdm_connected && (driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
+ if (diag_bridge[HSIC].usb_connected &&
+ (driver->logging_mode == USB_MODE))
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
}
static int diag_hsic_suspend(void *ctxt)
@@ -223,12 +226,12 @@
if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
((driver->logging_mode == MEMORY_DEVICE_MODE) ||
- (driver->usb_mdm_connected)))
- queue_work(driver->diag_bridge_wq,
+ (diag_bridge[HSIC].usb_connected)))
+ queue_work(diag_bridge[HSIC].wq,
&driver->diag_read_hsic_work);
}
-static struct diag_bridge_ops hsic_diag_bridge_ops = {
+struct diag_bridge_ops hsic_diag_bridge_ops = {
.ctxt = NULL,
.read_complete_cb = diag_hsic_read_complete_callback,
.write_complete_cb = diag_hsic_write_complete_callback,
@@ -236,7 +239,7 @@
.resume = diag_hsic_resume,
};
-static void diag_hsic_close(void)
+void diag_hsic_close(void)
{
if (driver->hsic_device_enabled) {
driver->hsic_ch = 0;
@@ -257,7 +260,7 @@
{
int err;
- mutex_lock(&driver->bridge_mutex);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
if (driver->hsic_device_enabled) {
if (driver->hsic_device_opened) {
driver->hsic_ch = 0;
@@ -274,112 +277,7 @@
}
}
}
-
- mutex_unlock(&driver->bridge_mutex);
- return 0;
-}
-
-/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
-int diagfwd_connect_bridge(int process_cable)
-{
- int err;
-
- pr_debug("diag: in %s\n", __func__);
-
- mutex_lock(&driver->bridge_mutex);
- /* If the usb cable is being connected */
- if (process_cable) {
- err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE,
- N_MDM_READ);
- if (err)
- pr_err("diag: unable to alloc USB req on mdm"
- " ch err:%d\n", err);
-
- driver->usb_mdm_connected = 1;
- }
-
- if (driver->hsic_device_enabled) {
- driver->in_busy_hsic_read_on_device = 0;
- driver->in_busy_hsic_write = 0;
- } else if (driver->diag_smux_enabled) {
- driver->in_busy_smux = 0;
- diagfwd_connect_smux();
- mutex_unlock(&driver->bridge_mutex);
- return 0;
- }
-
- /* If the hsic (diag_bridge) platform device is not open */
- if (driver->hsic_device_enabled) {
- if (!driver->hsic_device_opened) {
- err = diag_bridge_open(&hsic_diag_bridge_ops);
- if (err) {
- pr_err("diag: HSIC channel open error: %d\n",
- err);
- } else {
- pr_debug("diag: opened HSIC channel\n");
- driver->hsic_device_opened = 1;
- }
- } else {
- pr_debug("diag: HSIC channel already open\n");
- }
-
- /*
- * Turn on communication over usb mdm and hsic, if the hsic
- * device driver is enabled and opened
- */
- if (driver->hsic_device_opened) {
- driver->hsic_ch = 1;
-
- /* Poll USB mdm channel to check for data */
- if (driver->logging_mode == USB_MODE)
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_mdm_work);
-
- /* Poll HSIC channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_hsic_work);
- }
- } else {
- /* The hsic device driver has not yet been enabled */
- pr_info("diag: HSIC channel not yet enabled\n");
- }
-
- mutex_unlock(&driver->bridge_mutex);
- return 0;
-}
-
-/*
- * diagfwd_disconnect_bridge is called when the USB mdm channel
- * is disconnected
- */
-int diagfwd_disconnect_bridge(int process_cable)
-{
- pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
-
- mutex_lock(&driver->bridge_mutex);
-
- /* If the usb cable is being disconnected */
- if (process_cable) {
- driver->usb_mdm_connected = 0;
- usb_diag_free_req(driver->mdm_ch);
- }
-
- if (driver->hsic_device_enabled &&
- driver->logging_mode != MEMORY_DEVICE_MODE) {
- driver->in_busy_hsic_read_on_device = 1;
- driver->in_busy_hsic_write = 1;
- /* Turn off communication over usb mdm and hsic */
- diag_hsic_close();
- } else if (driver->diag_smux_enabled &&
- driver->logging_mode == USB_MODE) {
- driver->in_busy_smux = 1;
- driver->lcid = LCID_INVALID;
- driver->smux_connected = 0;
- /* Turn off communication over usb mdm and smux */
- msm_smux_close(LCID_VALID);
- }
-
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return 0;
}
@@ -403,225 +301,128 @@
return 0;
}
- /* Read data from the hsic */
- queue_work(driver->diag_bridge_wq, &driver->diag_read_hsic_work);
+ /* Read data from the HSIC */
+ queue_work(diag_bridge[HSIC].wq, &driver->diag_read_hsic_work);
return 0;
}
-/* Called after the asychronous usb_diag_read() on mdm channel is complete */
-static int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+void diag_usb_read_complete_hsic_fn(struct work_struct *w)
{
- /* The read of the usb driver on the mdm (not hsic) has completed */
- driver->in_busy_hsic_read_on_device = 0;
- driver->read_len_mdm = diag_read_ptr->actual;
+ diagfwd_read_complete_bridge(diag_bridge[HSIC].usb_read_ptr);
+}
- if (driver->diag_smux_enabled) {
- diagfwd_read_complete_smux();
- return 0;
- }
- /* If SMUX not enabled, check for HSIC */
+
+void diag_read_usb_hsic_work_fn(struct work_struct *work)
+{
if (!driver->hsic_ch) {
- pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
- return 0;
- }
-
- /*
- * The read of the usb driver on the mdm channel has completed.
- * If there is no write on the hsic in progress, check if the
- * read has data to pass on to the hsic. If so, pass the usb
- * mdm data on to the hsic.
- */
- if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out &&
- (driver->read_len_mdm > 0)) {
-
- /*
- * Initiate the hsic write. The hsic write is
- * asynchronous. When complete the write
- * complete callback function will be called
- */
- int err;
- driver->in_busy_hsic_write = 1;
- err = diag_bridge_write(driver->usb_buf_mdm_out,
- driver->read_len_mdm);
- if (err) {
- pr_err_ratelimited("diag: mdm data on hsic write err: %d\n",
- err);
- /*
- * If the error is recoverable, then clear
- * the write flag, so we will resubmit a
- * write on the next frame. Otherwise, don't
- * resubmit a write on the next frame.
- */
- if ((-ENODEV) != err)
- driver->in_busy_hsic_write = 0;
- }
- }
-
- /*
- * If there is no write of the usb mdm data on the
- * hsic channel
- */
- if (!driver->in_busy_hsic_write && (driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
-
- return 0;
-}
-
-static void diagfwd_bridge_notifier(void *priv, unsigned event,
- struct diag_request *d_req)
-{
- switch (event) {
- case USB_DIAG_CONNECT:
- diagfwd_connect_bridge(1);
- break;
- case USB_DIAG_DISCONNECT:
- queue_work(driver->diag_bridge_wq,
- &driver->diag_disconnect_work);
- break;
- case USB_DIAG_READ_DONE:
- queue_work(driver->diag_bridge_wq,
- &driver->diag_usb_read_complete_work);
- break;
- case USB_DIAG_WRITE_DONE:
- if (driver->hsic_device_enabled)
- diagfwd_write_complete_hsic(d_req);
- else if (driver->diag_smux_enabled)
- diagfwd_write_complete_smux();
- break;
- default:
- pr_err("diag: in %s: Unknown event from USB diag:%u\n",
- __func__, event);
- break;
- }
-}
-
-static void diag_usb_read_complete_fn(struct work_struct *w)
-{
- diagfwd_read_complete_bridge(driver->usb_read_mdm_ptr);
-}
-
-static void diag_disconnect_work_fn(struct work_struct *w)
-{
- diagfwd_disconnect_bridge(1);
-}
-
-static void diag_read_mdm_work_fn(struct work_struct *work)
-{
- int ret;
- if (driver->diag_smux_enabled) {
- if (driver->lcid && driver->usb_buf_mdm_out &&
- (driver->read_len_mdm > 0) &&
- driver->smux_connected) {
- ret = msm_smux_write(driver->lcid, NULL,
- driver->usb_buf_mdm_out, driver->read_len_mdm);
- if (ret)
- pr_err("diag: writing to SMUX ch, r = %d,"
- "lcid = %d\n", ret, driver->lcid);
- }
- driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
- driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
- usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+ pr_err("diag: in %s: driver->hsic_ch == 0\n", __func__);
return;
}
-
- /* if SMUX not enabled, check for HSIC */
- if (!driver->hsic_ch) {
- pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
- return;
- }
-
/*
* If there is no data being read from the usb mdm channel
* and there is no mdm channel data currently being written
- * to the hsic
+ * to the HSIC
*/
if (!driver->in_busy_hsic_read_on_device &&
- !driver->in_busy_hsic_write) {
+ !driver->in_busy_hsic_write) {
APPEND_DEBUG('x');
-
/* Setup the next read from usb mdm channel */
driver->in_busy_hsic_read_on_device = 1;
- driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
- driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
- usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+ diag_bridge[HSIC].usb_read_ptr->buf =
+ diag_bridge[HSIC].usb_buf_out;
+ diag_bridge[HSIC].usb_read_ptr->length = USB_MAX_OUT_BUF;
+ diag_bridge[HSIC].usb_read_ptr->context = (void *)HSIC;
+ usb_diag_read(diag_bridge[HSIC].ch,
+ diag_bridge[HSIC].usb_read_ptr);
APPEND_DEBUG('y');
}
-
- /*
- * If for some reason there was no mdm channel read initiated,
+ /* If for some reason there was no mdm channel read initiated,
* queue up the reading of data from the mdm channel
*/
+
if (!driver->in_busy_hsic_read_on_device &&
(driver->logging_mode == USB_MODE))
- queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &(diag_bridge[HSIC].diag_read_work));
}
static int diag_hsic_probe(struct platform_device *pdev)
{
int err = 0;
+
pr_debug("diag: in %s\n", __func__);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
if (!driver->hsic_inited) {
+ spin_lock_init(&driver->hsic_spinlock);
+ driver->num_hsic_buf_tbl_entries = 0;
+ if (driver->hsic_buf_tbl == NULL)
+ driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES
+ * sizeof(struct diag_write_device), GFP_KERNEL);
+ if (driver->hsic_buf_tbl == NULL) {
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+ return -ENOMEM;
+ }
+ driver->count_hsic_pool = 0;
+ driver->count_hsic_write_pool = 0;
+ driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
+ driver->poolsize_hsic = N_MDM_WRITE;
+ driver->itemsize_hsic_write = sizeof(struct diag_request);
+ driver->poolsize_hsic_write = N_MDM_WRITE;
diagmem_hsic_init(driver);
INIT_WORK(&(driver->diag_read_hsic_work),
- diag_read_hsic_work_fn);
+ diag_read_hsic_work_fn);
driver->hsic_inited = 1;
}
-
- mutex_lock(&driver->bridge_mutex);
-
/*
* The probe function was called after the usb was connected
* on the legacy channel OR ODL is turned on. Communication over usb
- * mdm and hsic needs to be turned on.
+ * mdm and HSIC needs to be turned on.
*/
- if (driver->usb_mdm_connected || (driver->logging_mode ==
- MEMORY_DEVICE_MODE)) {
+ if (diag_bridge[HSIC].usb_connected || (driver->logging_mode ==
+ MEMORY_DEVICE_MODE)) {
if (driver->hsic_device_opened) {
/* should not happen. close it before re-opening */
pr_warn("diag: HSIC channel already opened in probe\n");
diag_bridge_close();
}
-
err = diag_bridge_open(&hsic_diag_bridge_ops);
if (err) {
pr_err("diag: could not open HSIC, err: %d\n", err);
driver->hsic_device_opened = 0;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return err;
}
pr_info("diag: opened HSIC channel\n");
driver->hsic_device_opened = 1;
driver->hsic_ch = 1;
-
driver->in_busy_hsic_read_on_device = 0;
driver->in_busy_hsic_write = 0;
- if (driver->usb_mdm_connected) {
+ if (diag_bridge[HSIC].usb_connected) {
/* Poll USB mdm channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_mdm_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &diag_bridge[HSIC].diag_read_work);
}
-
/* Poll HSIC channel to check for data */
- queue_work(driver->diag_bridge_wq,
- &driver->diag_read_hsic_work);
+ queue_work(diag_bridge[HSIC].wq,
+ &driver->diag_read_hsic_work);
}
-
- /* The hsic (diag_bridge) platform device driver is enabled */
+ /* The HSIC (diag_bridge) platform device driver is enabled */
driver->hsic_device_enabled = 1;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
return err;
}
static int diag_hsic_remove(struct platform_device *pdev)
{
pr_debug("diag: %s called\n", __func__);
- mutex_lock(&driver->bridge_mutex);
+ mutex_lock(&diag_bridge[HSIC].bridge_mutex);
diag_hsic_close();
driver->hsic_device_enabled = 0;
- mutex_unlock(&driver->bridge_mutex);
+ mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+
return 0;
}
@@ -642,7 +443,7 @@
.runtime_resume = diagfwd_hsic_runtime_resume,
};
-static struct platform_driver msm_hsic_ch_driver = {
+struct platform_driver msm_hsic_ch_driver = {
.probe = diag_hsic_probe,
.remove = diag_hsic_remove,
.driver = {
@@ -651,112 +452,3 @@
.pm = &diagfwd_hsic_dev_pm_ops,
},
};
-
-void diagfwd_bridge_init(void)
-{
- int ret;
-
- pr_debug("diag: in %s\n", __func__);
- driver->diag_bridge_wq = create_singlethread_workqueue(
- "diag_bridge_wq");
- driver->read_len_mdm = 0;
- driver->write_len_mdm = 0;
- driver->num_hsic_buf_tbl_entries = 0;
- spin_lock_init(&driver->hsic_spinlock);
- if (driver->usb_buf_mdm_out == NULL)
- driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF,
- GFP_KERNEL);
- if (driver->usb_buf_mdm_out == NULL)
- goto err;
- /* Only used by smux move to smux probe function */
- if (driver->write_ptr_mdm == NULL)
- driver->write_ptr_mdm = kzalloc(
- sizeof(struct diag_request), GFP_KERNEL);
- if (driver->write_ptr_mdm == NULL)
- goto err;
- if (driver->usb_read_mdm_ptr == NULL)
- driver->usb_read_mdm_ptr = kzalloc(
- sizeof(struct diag_request), GFP_KERNEL);
- if (driver->usb_read_mdm_ptr == NULL)
- goto err;
-
- if (driver->hsic_buf_tbl == NULL)
- driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES *
- sizeof(struct diag_write_device), GFP_KERNEL);
- if (driver->hsic_buf_tbl == NULL)
- goto err;
-
- driver->count_hsic_pool = 0;
- driver->count_hsic_write_pool = 0;
-
- driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
- driver->poolsize_hsic = N_MDM_WRITE;
- driver->itemsize_hsic_write = sizeof(struct diag_request);
- driver->poolsize_hsic_write = N_MDM_WRITE;
-
- mutex_init(&driver->bridge_mutex);
-#ifdef CONFIG_DIAG_OVER_USB
- INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn);
-#endif
- INIT_WORK(&(driver->diag_disconnect_work), diag_disconnect_work_fn);
- INIT_WORK(&(driver->diag_usb_read_complete_work),
- diag_usb_read_complete_fn);
-#ifdef CONFIG_DIAG_OVER_USB
- driver->mdm_ch = usb_diag_open(DIAG_MDM, driver,
- diagfwd_bridge_notifier);
- if (IS_ERR(driver->mdm_ch)) {
- pr_err("diag: Unable to open USB diag MDM channel\n");
- goto err;
- }
-#endif
- /* register HSIC device */
- ret = platform_driver_register(&msm_hsic_ch_driver);
- if (ret)
- pr_err("diag: could not register HSIC device, ret: %d\n", ret);
- /* register SMUX device */
- ret = platform_driver_register(&msm_diagfwd_smux_driver);
- if (ret)
- pr_err("diag: could not register SMUX device, ret: %d\n", ret);
-
- return;
-err:
- pr_err("diag: Could not initialize for bridge forwarding\n");
- kfree(driver->usb_buf_mdm_out);
- kfree(driver->hsic_buf_tbl);
- kfree(driver->write_ptr_mdm);
- kfree(driver->usb_read_mdm_ptr);
- if (driver->diag_bridge_wq)
- destroy_workqueue(driver->diag_bridge_wq);
-
- return;
-}
-
-void diagfwd_bridge_exit(void)
-{
- pr_debug("diag: in %s\n", __func__);
-
- if (driver->hsic_device_enabled) {
- diag_hsic_close();
- driver->hsic_device_enabled = 0;
- }
- driver->hsic_inited = 0;
- diagmem_exit(driver, POOL_TYPE_ALL);
- if (driver->diag_smux_enabled) {
- driver->lcid = LCID_INVALID;
- kfree(driver->buf_in_smux);
- driver->diag_smux_enabled = 0;
- }
- platform_driver_unregister(&msm_hsic_ch_driver);
- platform_driver_unregister(&msm_diagfwd_smux_driver);
- /* destroy USB MDM specific variables */
-#ifdef CONFIG_DIAG_OVER_USB
- if (driver->usb_mdm_connected)
- usb_diag_free_req(driver->mdm_ch);
- usb_diag_close(driver->mdm_ch);
-#endif
- kfree(driver->usb_buf_mdm_out);
- kfree(driver->hsic_buf_tbl);
- kfree(driver->write_ptr_mdm);
- kfree(driver->usb_read_mdm_ptr);
- destroy_workqueue(driver->diag_bridge_wq);
-}
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
index 19ed3c7..2190fff 100644
--- a/drivers/char/diag/diagfwd_hsic.h
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -17,14 +17,14 @@
#define N_MDM_WRITE 8
#define N_MDM_READ 1
-
#define NUM_HSIC_BUF_TBL_ENTRIES N_MDM_WRITE
-int diagfwd_connect_bridge(int);
-int diagfwd_disconnect_bridge(int);
int diagfwd_write_complete_hsic(struct diag_request *);
int diagfwd_cancel_hsic(void);
-void diagfwd_bridge_init(void);
-void diagfwd_bridge_exit(void);
+void diag_read_usb_hsic_work_fn(struct work_struct *work);
+void diag_usb_read_complete_hsic_fn(struct work_struct *w);
+extern struct diag_bridge_ops hsic_diag_bridge_ops;
+extern struct platform_driver msm_hsic_ch_driver;
+void diag_hsic_close(void);
#endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
index ae90686..0a97baf 100644
--- a/drivers/char/diag/diagfwd_smux.c
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -18,6 +18,8 @@
#include "diagchar.h"
#include "diagfwd.h"
#include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diagfwd_bridge.h"
void diag_smux_event(void *priv, int event_type, const void *metadata)
{
@@ -30,8 +32,8 @@
driver->smux_connected = 1;
driver->in_busy_smux = 0;
/* read data from USB MDM channel & Initiate first write */
- queue_work(driver->diag_bridge_wq,
- &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq,
+ &diag_bridge[SMUX].diag_read_work);
break;
case SMUX_DISCONNECTED:
driver->smux_connected = 0;
@@ -67,7 +69,7 @@
int diagfwd_read_complete_smux(void)
{
- queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq, &diag_bridge[SMUX].diag_read_work);
return 0;
}
@@ -85,6 +87,36 @@
return 0;
}
+void diag_usb_read_complete_smux_fn(struct work_struct *w)
+{
+ diagfwd_read_complete_bridge(diag_bridge[SMUX].usb_read_ptr);
+}
+
+void diag_read_usb_smux_work_fn(struct work_struct *work)
+{
+ int ret;
+
+ if (driver->diag_smux_enabled) {
+ if (driver->lcid && diag_bridge[SMUX].usb_buf_out &&
+ (diag_bridge[SMUX].read_len > 0) &&
+ driver->smux_connected) {
+ ret = msm_smux_write(driver->lcid, NULL,
+ diag_bridge[SMUX].usb_buf_out,
+ diag_bridge[SMUX].read_len);
+ if (ret)
+ pr_err("diag: writing to SMUX ch, r = %d, lcid = %d\n",
+ ret, driver->lcid);
+ }
+ diag_bridge[SMUX].usb_read_ptr->buf =
+ diag_bridge[SMUX].usb_buf_out;
+ diag_bridge[SMUX].usb_read_ptr->length = USB_MAX_OUT_BUF;
+ diag_bridge[SMUX].usb_read_ptr->context = (void *)SMUX;
+ usb_diag_read(diag_bridge[SMUX].ch,
+ diag_bridge[SMUX].usb_read_ptr);
+ return;
+ }
+}
+
static int diagfwd_smux_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
@@ -120,7 +152,7 @@
}
}
/* Poll USB channel to check for data*/
- queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+ queue_work(diag_bridge[SMUX].wq, &(diag_bridge[SMUX].diag_read_work));
return ret;
}
@@ -142,6 +174,11 @@
* if (ret)
* pr_err("diag: error setting SMUX ch option, r = %d\n", ret);
*/
+ if (driver->write_ptr_mdm == NULL)
+ driver->write_ptr_mdm = kzalloc(sizeof(struct diag_request),
+ GFP_KERNEL);
+ if (driver->write_ptr_mdm == NULL)
+ goto err;
ret = diagfwd_connect_smux();
return ret;
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
index e78b7ed..b45fd5d 100644
--- a/drivers/char/diag/diagfwd_smux.h
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -20,6 +20,8 @@
int diagfwd_read_complete_smux(void);
int diagfwd_write_complete_smux(void);
int diagfwd_connect_smux(void);
+void diag_usb_read_complete_smux_fn(struct work_struct *w);
+void diag_read_usb_smux_work_fn(struct work_struct *work);
extern struct platform_driver msm_diagfwd_smux_driver;
#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 1a522d5..ab1aa75 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -51,7 +51,7 @@
driver->diag_write_struct_pool, GFP_ATOMIC);
}
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC) {
if (driver->diag_hsic_pool) {
if (driver->count_hsic_pool < driver->poolsize_hsic) {
@@ -105,7 +105,7 @@
} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
printk(KERN_ALERT "Unable to destroy STRUCT mempool");
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
if (driver->diag_hsic_pool && (driver->hsic_inited == 0)) {
if (driver->count_hsic_pool == 0) {
mempool_destroy(driver->diag_hdlc_pool);
@@ -156,7 +156,7 @@
pr_err("diag: Attempt to free up DIAG driver "
"USB structure mempool which is already free %d ",
driver->count_write_struct_pool);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC) {
if (driver->diag_hsic_pool != NULL &&
driver->count_hsic_pool > 0) {
@@ -210,7 +210,7 @@
printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diagmem_hsic_init(struct diagchar_dev *driver)
{
if (driver->count_hsic_pool == 0)
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
index 8665c75..36def72f 100644
--- a/drivers/char/diag/diagmem.h
+++ b/drivers/char/diag/diagmem.h
@@ -18,7 +18,7 @@
void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
void diagmem_init(struct diagchar_dev *driver);
void diagmem_exit(struct diagchar_dev *driver, int pool_type);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diagmem_hsic_init(struct diagchar_dev *driver);
#endif
#endif
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index f3fe70f..9f96b19 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -234,6 +234,8 @@
uint32_t timestamp_event;
bool pcsave_impl;
bool pcsave_enable;
+ bool pcsave_sticky_enable;
+ bool pcsave_boot_enable;
};
static struct etm_drvdata *etmdrvdata[NR_CPUS];
@@ -1516,7 +1518,7 @@
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
-static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+static int ____etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
{
int ret = 0;
@@ -1524,7 +1526,6 @@
if (ret)
return ret;
- get_online_cpus();
spin_lock(&drvdata->spinlock);
if (val) {
if (drvdata->pcsave_enable)
@@ -1535,6 +1536,7 @@
if (ret)
goto out;
drvdata->pcsave_enable = true;
+ drvdata->pcsave_sticky_enable = true;
dev_info(drvdata->dev, "PC save enabled\n");
} else {
@@ -1551,12 +1553,22 @@
}
out:
spin_unlock(&drvdata->spinlock);
- put_online_cpus();
clk_disable_unprepare(drvdata->clk);
return ret;
}
+static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+{
+ int ret;
+
+ get_online_cpus();
+ ret = ____etm_store_pcsave(drvdata, val);
+ put_online_cpus();
+
+ return ret;
+}
+
static ssize_t etm_store_pcsave(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -1642,6 +1654,13 @@
}
break;
+ case CPU_ONLINE:
+ if (etmdrvdata[cpu] && etmdrvdata[cpu]->pcsave_boot_enable &&
+ !etmdrvdata[cpu]->pcsave_sticky_enable) {
+ ____etm_store_pcsave(etmdrvdata[cpu], 1);
+ }
+ break;
+
case CPU_DYING:
if (etmdrvdata[cpu] && etmdrvdata[cpu]->enable) {
spin_lock(&etmdrvdata[cpu]->spinlock);
@@ -1894,8 +1913,10 @@
if (boot_enable)
coresight_enable(drvdata->csdev);
- if (drvdata->pcsave_impl && boot_pcsave_enable)
- __etm_store_pcsave(drvdata, true);
+ if (drvdata->pcsave_impl && boot_pcsave_enable) {
+ __etm_store_pcsave(drvdata, 1);
+ drvdata->pcsave_boot_enable = true;
+ }
return 0;
err2:
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 0109d26..228f2f5 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1776,12 +1776,6 @@
return status;
}
-static inline void adreno_poke(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr);
-}
-
static int adreno_ringbuffer_drain(struct kgsl_device *device,
unsigned int *regs)
{
@@ -1802,12 +1796,8 @@
wait = jiffies + msecs_to_jiffies(100);
- adreno_poke(device);
-
do {
if (time_after(jiffies, wait)) {
- adreno_poke(device);
-
/* Check to see if the core is hung */
if (adreno_hang_detect(device, regs))
return -ETIMEDOUT;
@@ -2167,8 +2157,24 @@
if (!adreno_dev->fast_hang_detect)
return 0;
- if (is_adreno_rbbm_status_idle(device))
+ if (is_adreno_rbbm_status_idle(device)) {
+
+ /*
+ * On A2XX if the RPTR != WPTR and the device is idle, then
+ * the last write to WPTR probably failed to latch so write it
+ * again
+ */
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ unsigned int rptr;
+ adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+ if (rptr != adreno_dev->ringbuffer.wptr)
+ adreno_regwrite(device, REG_CP_RB_WPTR,
+ adreno_dev->ringbuffer.wptr);
+ }
+
return 0;
+ }
for (i = 0; i < hang_detect_regs_count; i++) {
@@ -2275,7 +2281,7 @@
status = 0;
goto done;
}
- adreno_poke(device);
+
io_cnt = (io_cnt + 1) % 100;
if (io_cnt <
pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index a384103..e319813 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -299,81 +299,83 @@
{419, 128000}
};
+/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {-40, 1758},
- {-35, 1742},
- {-30, 1719},
- {-25, 1691},
- {-20, 1654},
- {-15, 1608},
- {-10, 1551},
- {-5, 1483},
- {0, 1404},
- {5, 1315},
- {10, 1218},
- {15, 1114},
- {20, 1007},
- {25, 900},
- {30, 795},
- {35, 696},
- {40, 605},
- {45, 522},
- {50, 448},
- {55, 383},
- {60, 327},
- {65, 278},
- {70, 237},
- {75, 202},
- {80, 172},
- {85, 146},
- {90, 125},
- {95, 107},
- {100, 92},
- {105, 79},
- {110, 68},
- {115, 59},
- {120, 51},
- {125, 44}
+ {1758, -40},
+ {1742, -35},
+ {1719, -30},
+ {1691, -25},
+ {1654, -20},
+ {1608, -15},
+ {1551, -10},
+ {1483, -5},
+ {1404, 0},
+ {1315, 5},
+ {1218, 10},
+ {1114, 15},
+ {1007, 20},
+ {900, 25},
+ {795, 30},
+ {696, 35},
+ {605, 40},
+ {522, 45},
+ {448, 50},
+ {383, 55},
+ {327, 60},
+ {278, 65},
+ {237, 70},
+ {202, 75},
+ {172, 80},
+ {146, 85},
+ {125, 90},
+ {107, 95},
+ {92, 100},
+ {79, 105},
+ {68, 110},
+ {59, 115},
+ {51, 120},
+ {44, 125}
};
+/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
- {-40, 1738},
- {-35, 1714},
- {-30, 1682},
- {-25, 1641},
- {-20, 1589},
- {-15, 1526},
- {-10, 1451},
- {-5, 1363},
- {0, 1266},
- {5, 1159},
- {10, 1048},
- {15, 936},
- {20, 825},
- {25, 720},
- {30, 622},
- {35, 533},
- {40, 454},
- {45, 385},
- {50, 326},
- {55, 275},
- {60, 232},
- {65, 195},
- {70, 165},
- {75, 139},
- {80, 118},
- {85, 100},
- {90, 85},
- {95, 73},
- {100, 62},
- {105, 53},
- {110, 46},
- {115, 40},
- {120, 34},
- {125, 30}
+ {1738, -40},
+ {1714, -35},
+ {1682, -30},
+ {1641, -25},
+ {1589, -20},
+ {1526, -15},
+ {1451, -10},
+ {1363, -5},
+ {1266, 0},
+ {1159, 5},
+ {1048, 10},
+ {936, 15},
+ {825, 20},
+ {720, 25},
+ {622, 30},
+ {533, 35},
+ {454, 40},
+ {385, 45},
+ {326, 50},
+ {275, 55},
+ {232, 60},
+ {195, 65},
+ {165, 70},
+ {139, 75},
+ {118, 80},
+ {100, 85},
+ {85, 90},
+ {73, 95},
+ {62, 100},
+ {53, 105},
+ {46, 110},
+ {40, 115},
+ {34, 120},
+ {30, 125}
};
-static int32_t qpnp_adc_map_linear(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
uint32_t tablesize, int32_t input, int64_t *output)
{
bool descending = 1;
@@ -419,7 +421,7 @@
return 0;
}
-static int32_t qpnp_adc_map_batt_therm(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
uint32_t tablesize, int32_t input, int64_t *output)
{
bool descending = 1;
@@ -552,7 +554,7 @@
xo_thm = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
xo_thm <<= 4;
- qpnp_adc_map_linear(adcmap_ntcg_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_ntcg_104ef_104fb,
ARRAY_SIZE(adcmap_ntcg_104ef_104fb),
xo_thm, &adc_chan_result->physical);
@@ -570,7 +572,7 @@
bat_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- return qpnp_adc_map_batt_therm(
+ return qpnp_adc_map_temp_voltage(
adcmap_btm_threshold,
ARRAY_SIZE(adcmap_btm_threshold),
bat_voltage,
@@ -588,7 +590,7 @@
therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- qpnp_adc_map_linear(adcmap_150k_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
ARRAY_SIZE(adcmap_150k_104ef_104fb),
therm_voltage, &adc_chan_result->physical);
@@ -606,7 +608,7 @@
therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
adc_properties, chan_properties);
- qpnp_adc_map_linear(adcmap_100k_104ef_104fb,
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
ARRAY_SIZE(adcmap_100k_104ef_104fb),
therm_voltage, &adc_chan_result->physical);
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 255920e..a641ce9 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -46,8 +46,8 @@
/* wled control registers */
#define WLED_MOD_CTRL_REG SSBI_REG_ADDR_WLED_CTRL(1)
#define WLED_MAX_CURR_CFG_REG(n) SSBI_REG_ADDR_WLED_CTRL(n + 2)
-#define WLED_BRIGHTNESS_CNTL_REG1(n) SSBI_REG_ADDR_WLED_CTRL(n + 5)
-#define WLED_BRIGHTNESS_CNTL_REG2(n) SSBI_REG_ADDR_WLED_CTRL(n + 6)
+#define WLED_BRIGHTNESS_CNTL_REG1(n) SSBI_REG_ADDR_WLED_CTRL((2 * n) + 5)
+#define WLED_BRIGHTNESS_CNTL_REG2(n) SSBI_REG_ADDR_WLED_CTRL((2 * n) + 6)
#define WLED_SYNC_REG SSBI_REG_ADDR_WLED_CTRL(11)
#define WLED_OVP_CFG_REG SSBI_REG_ADDR_WLED_CTRL(13)
#define WLED_BOOST_CFG_REG SSBI_REG_ADDR_WLED_CTRL(14)
@@ -640,7 +640,7 @@
/* program activation delay and maximum current */
for (i = 0; i < num_wled_strings; i++) {
rc = pm8xxx_readb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i + 2), &val);
+ WLED_MAX_CURR_CFG_REG(i), &val);
if (rc) {
dev_err(led->dev->parent, "can't read wled max current"
" config register rc=%d\n", rc);
@@ -665,7 +665,7 @@
val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
rc = pm8xxx_writeb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i + 2), val);
+ WLED_MAX_CURR_CFG_REG(i), val);
if (rc) {
dev_err(led->dev->parent, "can't write wled max current"
" config register rc=%d\n", rc);
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 49543a4..b2a7f71 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -1645,6 +1645,7 @@
static const struct v4l2_ioctl_ops msm_ioctl_ops_server = {
.vidioc_subscribe_event = msm_server_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = msm_server_v4l2_unsubscribe_event,
.vidioc_default = msm_ioctl_server,
};
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 5e39d25..387c254 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -17,7 +17,7 @@
#include <linux/proc_fs.h>
#include <linux/ioctl.h>
#include <mach/camera.h>
-#include "msm.h"
+#include "../msm.h"
uint32_t msm_cam_server_get_mctl_handle(void);
struct iommu_domain *msm_cam_server_get_domain(void);
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index c4bfaf4..a95a296 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -47,6 +47,27 @@
"Decode Order",
NULL
};
+static const char *const mpeg_video_vidc_extradata[] = {
+ "Extradata none",
+ "Extradata MB Quantization",
+ "Extradata Interlace Video",
+ "Extradata VC1 Framedisp",
+ "Extradata VC1 Seqdisp",
+ "Extradata timestamp",
+ "Extradata S3D Frame Packing",
+ "Extradata Frame Rate",
+ "Extradata Panscan Window",
+ "Extradata Recovery point SEI",
+ "Extradata Closed Caption UD",
+ "Extradata AFD UD",
+ "Extradata Multislice info",
+ "Extradata number of concealed MB",
+ "Extradata metadata filler",
+ "Extradata input crop",
+ "Extradata digital zoom",
+ "Extradata aspect ratio",
+};
+
static const struct msm_vidc_ctrl msm_vdec_ctrls[] = {
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT,
@@ -168,6 +189,36 @@
.menu_skip_mask = 0,
.qmenu = NULL,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+ .name = "Extradata Type",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+ .default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
+ (1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
+ ),
+ .qmenu = mpeg_video_vidc_extradata,
+ .step = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
@@ -1087,6 +1138,15 @@
inst->mode = VIDC_SECURE;
dprintk(VIDC_DBG, "Setting secure mode to :%d\n", inst->mode);
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+ {
+ struct hal_extradata_enable extra;
+ property_id = HAL_PARAM_INDEX_EXTRADATA;
+ extra.index = msm_comm_get_hal_extradata_index(control.value);
+ extra.enable = 1;
+ pdata = &extra;
+ break;
+ }
default:
break;
}
@@ -1111,6 +1171,7 @@
failed_open_done:
return rc;
}
+
static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
return 0;
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index e85389e..cda0ea8 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -2141,3 +2141,61 @@
mutex_unlock(&inst->sync_lock);
return rc;
}
+
+
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+ enum v4l2_mpeg_vidc_extradata index)
+{
+ int ret = 0;
+ switch (index) {
+ case V4L2_MPEG_VIDC_EXTRADATA_NONE:
+ ret = HAL_EXTRADATA_NONE;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+ ret = HAL_EXTRADATA_MB_QUANTIZATION;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+ ret = HAL_EXTRADATA_INTERLACE_VIDEO;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP:
+ ret = HAL_EXTRADATA_VC1_FRAMEDISP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP:
+ ret = HAL_EXTRADATA_VC1_SEQDISP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+ ret = HAL_EXTRADATA_TIMESTAMP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+ ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+ ret = HAL_EXTRADATA_FRAME_RATE;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+ ret = HAL_EXTRADATA_PANSCAN_WINDOW;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+ ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD:
+ ret = HAL_EXTRADATA_CLOSED_CAPTION_UD;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_AFD_UD:
+ ret = HAL_EXTRADATA_AFD_UD;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+ ret = HAL_EXTRADATA_MULTISLICE_INFO;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+ ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+ ret = HAL_EXTRADATA_METADATA_FILLER;
+ break;
+ default:
+ dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
+ break;
+ }
+ return ret;
+};
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 7562058..28bec97 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -41,6 +41,8 @@
enum instance_state state);
int msm_comm_unset_ocmem(struct msm_vidc_core *core);
int msm_comm_free_ocmem(struct msm_vidc_core *core);
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+ enum v4l2_mpeg_vidc_extradata index);
#define IS_PRIV_CTRL(idx) (\
(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
V4L2_CTRL_DRIVER_PRIV(idx))
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index f44be4d..e449821 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -972,6 +972,61 @@
}
return buffer;
}
+
+
+static int get_hfi_extradata_index(enum hal_extradata_id index)
+{
+ int ret = 0;
+ switch (index) {
+ case HAL_EXTRADATA_MB_QUANTIZATION:
+ ret = HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION;
+ break;
+ case HAL_EXTRADATA_INTERLACE_VIDEO:
+ ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VC1_FRAMEDISP:
+ ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_VC1_SEQDISP:
+ ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_TIMESTAMP:
+ ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_S3D_FRAME_PACKING:
+ ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_FRAME_RATE:
+ ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_PANSCAN_WINDOW:
+ ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_RECOVERY_POINT_SEI:
+ ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_CLOSED_CAPTION_UD:
+ ret = HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_AFD_UD:
+ ret = HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA;
+ break;
+ case HAL_EXTRADATA_MULTISLICE_INFO:
+ ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
+ break;
+ case HAL_EXTRADATA_NUM_CONCEALED_MB:
+ ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
+ break;
+ case HAL_EXTRADATA_INDEX:
+ ret = HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG;
+ break;
+ default:
+ dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
+ break;
+ }
+ return ret;
+}
+
int vidc_hal_session_set_property(void *sess,
enum hal_property ptype, void *pdata)
{
@@ -979,6 +1034,7 @@
struct hfi_cmd_session_set_property_packet *pkt =
(struct hfi_cmd_session_set_property_packet *) &packet;
struct hal_session *session;
+ int rc = 0;
if (!sess || !pdata) {
dprintk(VIDC_ERR, "Invalid Params");
@@ -1528,6 +1584,30 @@
hfi_multi_slice_control);
break;
}
+ case HAL_PARAM_INDEX_EXTRADATA:
+ {
+ struct hfi_index_extradata_config *hfi;
+ struct hal_extradata_enable *extra = pdata;
+ int index = 0;
+ pkt->rg_property_data[0] =
+ get_hfi_extradata_index(extra->index);
+ hfi =
+ (struct hfi_index_extradata_config *)
+ &pkt->rg_property_data[1];
+ hfi->enable = extra->enable;
+ index = get_hfi_extradata_index(extra->index);
+ if (index)
+ hfi->index_extra_data_id = index;
+ else {
+ dprintk(VIDC_WARN,
+ "Failed to find extradata index: %d\n",
+ index);
+ rc = -EINVAL;
+ }
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_index_extradata_config);
+ break;
+ }
case HAL_CONFIG_VPE_DEINTERLACE:
break;
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
@@ -1562,9 +1642,10 @@
dprintk(VIDC_INFO, "DEFAULT: Calling 0x%x", ptype);
break;
}
- if (vidc_hal_iface_cmdq_write(session->device, pkt))
- return -ENOTEMPTY;
- return 0;
+ if (!rc)
+ rc = vidc_hal_iface_cmdq_write(session->device, pkt);
+
+ return rc;
}
int vidc_hal_session_get_property(void *sess,
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 8aff5af..9d20a31 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -72,12 +72,32 @@
VIDC_ERR_UNUSED = 0x10000000
};
+enum hal_extradata_id {
+ HAL_EXTRADATA_NONE,
+ HAL_EXTRADATA_MB_QUANTIZATION,
+ HAL_EXTRADATA_INTERLACE_VIDEO,
+ HAL_EXTRADATA_VC1_FRAMEDISP,
+ HAL_EXTRADATA_VC1_SEQDISP,
+ HAL_EXTRADATA_TIMESTAMP,
+ HAL_EXTRADATA_S3D_FRAME_PACKING,
+ HAL_EXTRADATA_FRAME_RATE,
+ HAL_EXTRADATA_PANSCAN_WINDOW,
+ HAL_EXTRADATA_RECOVERY_POINT_SEI,
+ HAL_EXTRADATA_CLOSED_CAPTION_UD,
+ HAL_EXTRADATA_AFD_UD,
+ HAL_EXTRADATA_MULTISLICE_INFO,
+ HAL_EXTRADATA_INDEX,
+ HAL_EXTRADATA_NUM_CONCEALED_MB,
+ HAL_EXTRADATA_METADATA_FILLER,
+};
+
enum hal_property {
HAL_CONFIG_FRAME_RATE = 0x04000001,
HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
+ HAL_PARAM_INDEX_EXTRADATA,
HAL_PARAM_FRAME_SIZE,
HAL_CONFIG_REALTIME,
HAL_PARAM_BUFFER_COUNT_ACTUAL,
@@ -460,6 +480,11 @@
HAL_UNUSED_PICT = 0x10000000,
};
+struct hal_extradata_enable {
+ u32 enable;
+ enum hal_extradata_id index;
+};
+
struct hal_enable_picture {
u32 picture_type;
};
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 82f9e58..5161b7b 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -396,6 +396,7 @@
if (rc == 0 && atomic_read(&dev->vp_enabled) == 1) {
/* This should not happen, if it does hw is stuck */
disable_irq_nosync(dev->vpirq->start);
+ atomic_set(&dev->vp_enabled, 0);
pr_err("%s: VP Timeout and VP still running\n",
__func__);
}
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 46015b0..55ffb17 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -49,6 +49,9 @@
#define QSEOS_VERSION_13 0x13
#define QSEOS_VERSION_14 0x14
#define QSEEE_VERSION_00 0x400000
+#define QSEE_VERSION_01 0x401000
+#define QSEE_VERSION_02 0x402000
+
#define QSEOS_CHECK_VERSION_CMD 0x00001803
@@ -75,6 +78,7 @@
QSEOS_GET_APP_STATE_COMMAND,
QSEOS_LOAD_SERV_IMAGE_COMMAND,
QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+ QSEOS_APP_REGION_NOTIFICATION,
QSEOS_CMD_MAX = 0xEFFFFFFF
};
@@ -89,6 +93,12 @@
CLK_SFPB,
};
+__packed struct qsee_apps_region_info_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t addr;
+ uint32_t size;
+};
+
__packed struct qseecom_check_app_ireq {
uint32_t qsee_cmd_id;
char app_name[MAX_APP_NAME_SIZE];
@@ -2558,6 +2568,32 @@
}
qseecom_platform_support = (struct msm_bus_scale_pdata *)
msm_bus_cl_get_pdata(pdev);
+ if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
+ struct resource *resource = NULL;
+ struct qsee_apps_region_info_ireq req;
+ struct qseecom_command_scm_resp resp;
+
+ resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "secapp-region");
+ if (resource) {
+ req.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
+ req.addr = resource->start;
+ req.size = resource_size(resource);
+ pr_warn("secure app region addr=0x%x size=0x%x",
+ req.addr, req.size);
+ } else {
+ pr_err("Fail to get secure app region info\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (rc) {
+ pr_err("Failed to send secapp region info %d\n",
+ rc);
+ goto err;
+ }
+ }
} else {
qseecom_platform_support = (struct msm_bus_scale_pdata *)
pdev->dev.platform_data;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 34e1d40..75cc086 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -76,4 +76,18 @@
PNP PMIC. It configures the frequency of clkdiv outputs on the
PMIC. These clocks are typically wired through alternate functions
on gpio pins.
+
+config IPA
+ tristate "IPA support"
+ depends on SPS
+ help
+ This driver supports the Internet Packet Accelerator (IPA) core.
+ IPA is a programmable protocol processor HW block.
+ It is designed to support generic HW processing of UL/DL IP packets
+ for various use cases independent of radio technology.
+ The driver support client connection and configuration
+ for the IPA core.
+ Kernel and user-space processes can call the IPA driver
+ to configure IPA core.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 35efd91..0a755d3 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MSM_SSBI) += ssbi.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_IPA) += ipa/
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..ded5b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+ ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
new file mode 100644
index 0000000..0ae2552
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+static struct a2_service_cb_type {
+ void *tx_complete_cb;
+ void *rx_cb;
+ u32 producer_handle;
+ u32 consumer_handle;
+} a2_service_cb;
+
+static struct sps_mem_buffer data_mem_buf[2];
+static struct sps_mem_buffer desc_mem_buf[2];
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe);
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl);
+
+/**
+ * a2_mux_initialize() - initialize A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ */
+int a2_mux_initialize(void)
+{
+ (void) msm_bam_dmux_ul_power_vote();
+
+ return 0;
+}
+
+/**
+ * a2_mux_close() - close A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_close(void)
+{
+ int ret = 0;
+
+ (void) msm_bam_dmux_ul_power_unvote();
+
+ ret = ipa_disconnect(a2_service_cb.consumer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = ipa_disconnect(a2_service_cb.producer_handle);
+ if (0 != ret) {
+ pr_err("%s: ipa_disconnect failure\n", __func__);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+/**
+ * a2_mux_open_port() - open connection to A2
+ * @wwan_logical_channel_id: WWAN logical channel ID
+ * @rx_cb: Rx callback
+ * @tx_complete_cb: Tx completed callback
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb)
+{
+ int ret = 0;
+ u8 src_pipe = 0;
+ u8 dst_pipe = 0;
+ struct sps_pipe *a2_to_ipa_pipe = NULL;
+ struct sps_pipe *ipa_to_a2_pipe = NULL;
+
+ (void) wwan_logical_channel_id;
+
+ a2_service_cb.rx_cb = rx_cb;
+ a2_service_cb.tx_complete_cb = tx_complete_cb;
+
+ ret = connect_pipe_ipa(A2_TO_IPA,
+ &src_pipe,
+ &(a2_service_cb.consumer_handle),
+ a2_to_ipa_pipe);
+ if (ret) {
+ pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
+ goto bail;
+ }
+
+ ret = connect_pipe_ipa(IPA_TO_A2,
+ &dst_pipe,
+ &(a2_service_cb.producer_handle),
+ ipa_to_a2_pipe);
+ if (ret) {
+ pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
+ sps_disconnect(a2_to_ipa_pipe);
+ sps_free_endpoint(a2_to_ipa_pipe);
+ (void) ipa_disconnect(a2_service_cb.consumer_handle);
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+
+ return ret;
+}
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+ u8 *usb_pipe_idx,
+ u32 *clnt_hdl,
+ struct sps_pipe *pipe)
+{
+ int ret;
+ struct sps_connect connection = {0, };
+ u32 a2_handle = 0;
+ u32 a2_phy_addr = 0;
+ struct a2_mux_pipe_connection pipe_connection = { 0, };
+ struct ipa_connect_params ipa_in_params;
+ struct ipa_sps_params sps_out_params;
+
+ memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+ memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+ if (!usb_pipe_idx || !clnt_hdl) {
+ pr_err("connect_pipe_ipa :: null arguments\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
+ if (ret) {
+ pr_err("ipa_get_a2_mux_pipe_info failed\n");
+ goto bail;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ a2_phy_addr = pipe_connection.src_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+ ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+ ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+ pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
+ pipe_connection.src_pipe_index);
+ ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
+ } else {
+ a2_phy_addr = pipe_connection.dst_phy_addr;
+ ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+ ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
+ }
+
+ ret = sps_phy2h(a2_phy_addr, &a2_handle);
+ if (ret) {
+ pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.client_bam_hdl = a2_handle;
+ ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
+ ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
+
+ if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
+ pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
+ ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
+ pipe_connection.data_fifo_base_offset,
+ pipe_connection.data_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: data fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
+ pipe_connection.desc_fifo_base_offset,
+ pipe_connection.desc_fifo_size, 1);
+ if (ret) {
+ pr_err("%s: desc. fifo setup failure %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ ipa_in_params.data = data_mem_buf[pipe_dir];
+ ipa_in_params.desc = desc_mem_buf[pipe_dir];
+ }
+
+ ret = a2_ipa_connect_pipe(&ipa_in_params,
+ &sps_out_params,
+ clnt_hdl);
+ if (ret) {
+ pr_err("-**- USB-IPA info: ipa_connect failed\n");
+ pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
+ goto bail;
+ }
+
+ pipe = sps_alloc_endpoint();
+ if (pipe == NULL) {
+ pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+ ret = -ENOMEM;
+ goto a2_ipa_connect_pipe_failed;
+ }
+
+ ret = sps_get_config(pipe, &connection);
+ if (ret) {
+ pr_err("%s: tx get config failed %d\n", __func__, ret);
+ goto get_config_failed;
+ }
+
+ if (pipe_dir == A2_TO_IPA) {
+ connection.mode = SPS_MODE_SRC;
+ *usb_pipe_idx = connection.src_pipe_index;
+ connection.source = a2_handle;
+ connection.destination = sps_out_params.ipa_bam_hdl;
+ connection.src_pipe_index = pipe_connection.src_pipe_index;
+ connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
+ } else {
+ connection.mode = SPS_MODE_DEST;
+ *usb_pipe_idx = connection.dest_pipe_index;
+ connection.source = sps_out_params.ipa_bam_hdl;
+ connection.destination = a2_handle;
+ connection.src_pipe_index = sps_out_params.ipa_ep_idx;
+ connection.dest_pipe_index = pipe_connection.dst_pipe_index;
+ }
+
+ connection.event_thresh = 16;
+ connection.data = sps_out_params.data;
+ connection.desc = sps_out_params.desc;
+
+ ret = sps_connect(pipe, &connection);
+ if (ret < 0) {
+ pr_err("%s: tx connect error %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = 0;
+ goto bail;
+error:
+ sps_disconnect(pipe);
+get_config_failed:
+ sps_free_endpoint(pipe);
+a2_ipa_connect_pipe_failed:
+ (void) ipa_disconnect(*clnt_hdl);
+bail:
+ return ret;
+}
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+ struct ipa_sps_params *out_params, u32 *clnt_hdl)
+{
+ return ipa_connect(in_params, out_params, clnt_hdl);
+}
+
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
new file mode 100644
index 0000000..80885da
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _A2_SERVICE_H_
+#define _A2_SERVICE_H_
+
+int a2_mux_initialize(void);
+
+int a2_mux_close(void);
+
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+ void *tx_complete_cb);
+
+#endif /* _A2_SERVICE_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
new file mode 100644
index 0000000..8f68ef5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -0,0 +1,1790 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_READ_MAX (16)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_DMA_POOL_SIZE (512)
+#define IPA_DMA_POOL_ALIGNMENT (4)
+#define IPA_DMA_POOL_BOUNDARY (1024)
+#define WLAN_AMPDU_TX_EP (15)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+struct ipa_plat_drv_res {
+ u32 ipa_mem_base;
+ u32 ipa_mem_size;
+ u32 bam_mem_base;
+ u32 bam_mem_size;
+ u32 ipa_irq;
+ u32 bam_irq;
+ u32 ipa_pipe_mem_start_ofst;
+ u32 ipa_pipe_mem_size;
+ struct a2_mux_pipe_connection a2_to_ipa_pipe;
+ struct a2_mux_pipe_connection ipa_to_a2_pipe;
+};
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+static struct of_device_id ipa_plat_drv_match[] = {
+ {
+ .compatible = "qcom,ipa",
+ },
+
+ {
+ }
+};
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct device *ipa_dev;
+
+struct ipa_context *ipa_ctx;
+
+static bool polling_mode;
+module_param(polling_mode, bool, 0644);
+MODULE_PARM_DESC(polling_mode,
+ "1 - pure polling mode; 0 - interrupt+polling mode");
+static uint polling_delay_ms = 50;
+module_param(polling_delay_ms, uint, 0644);
+MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
+static bool hdr_tbl_lcl = 1;
+module_param(hdr_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
+static bool ip4_rt_tbl_lcl = 1;
+module_param(ip4_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_rt_tbl_lcl,
+ "where ip4 rt tables reside 1-local; 0-system");
+static bool ip6_rt_tbl_lcl = 1;
+module_param(ip6_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_rt_tbl_lcl,
+ "where ip6 rt tables reside 1-local; 0-system");
+static bool ip4_flt_tbl_lcl = 1;
+module_param(ip4_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_flt_tbl_lcl,
+ "where ip4 flt tables reside 1-local; 0-system");
+static bool ip6_flt_tbl_lcl = 1;
+module_param(ip6_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_flt_tbl_lcl,
+ "where ip6 flt tables reside 1-local; 0-system");
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata);
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection);
+
+static void ipa_set_aggregation_params(void);
+
+static ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ u32 reg_val = 0xfeedface;
+ char str[IPA_READ_MAX];
+ int result;
+ static int read_cnt;
+
+ if (read_cnt) {
+ IPAERR("only supports one call to read\n");
+ return 0;
+ }
+
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST);
+ result = scnprintf(str, IPA_READ_MAX, "%x\n", reg_val);
+ if (copy_to_user(buf, str, result))
+ return -EFAULT;
+ read_cnt = 1;
+
+ return result;
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_context *ctx = NULL;
+
+ IPADBG("ENTER\n");
+ ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ u32 pyld_sz;
+ u8 header[128] = { 0 };
+ u8 *param = NULL;
+ struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_v4_nat_del nat_del;
+
+ IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case IPA_IOC_ALLOC_NAT_MEM:
+ if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (allocate_nat_device(&nat_mem)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_V4_INIT_NAT:
+ if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_init_cmd(&nat_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_NAT_DMA:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ pyld_sz =
+ sizeof(struct ipa_ioc_nat_dma_cmd) +
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+ sizeof(struct ipa_ioc_nat_dma_one);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_V4_DEL_NAT:
+ if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_nat_del_cmd(&nat_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_hdr) +
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+ sizeof(struct ipa_hdr_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_hdr) +
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+ sizeof(struct ipa_hdr_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule) +
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+ sizeof(struct ipa_rt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_RT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_rt_rule) +
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+ sizeof(struct ipa_rt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_flt_rule) +
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+ sizeof(struct ipa_flt_rule_add);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_FLT_RULE:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
+ retval = -EFAULT;
+ break;
+ }
+ pyld_sz =
+ sizeof(struct ipa_ioc_del_flt_rule) +
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+ sizeof(struct ipa_flt_rule_del);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_COMMIT_HDR:
+ retval = ipa_commit_hdr();
+ break;
+ case IPA_IOC_RESET_HDR:
+ retval = ipa_reset_hdr();
+ break;
+ case IPA_IOC_COMMIT_RT:
+ retval = ipa_commit_rt(arg);
+ break;
+ case IPA_IOC_RESET_RT:
+ retval = ipa_reset_rt(arg);
+ break;
+ case IPA_IOC_COMMIT_FLT:
+ retval = ipa_commit_flt(arg);
+ break;
+ case IPA_IOC_RESET_FLT:
+ retval = ipa_reset_flt(arg);
+ break;
+ case IPA_IOC_DUMP:
+ ipa_dump();
+ break;
+ case IPA_IOC_GET_RT_TBL:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_RT_TBL:
+ retval = ipa_put_rt_tbl(arg);
+ break;
+ case IPA_IOC_GET_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_PUT_HDR:
+ retval = ipa_put_hdr(arg);
+ break;
+ case IPA_IOC_SET_FLT:
+ retval = ipa_cfg_filter(arg);
+ break;
+ case IPA_IOC_COPY_HDR:
+ if (copy_from_user(header, (u8 *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((u8 *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ default: /* redundant, as cmd was checked against MAXNR */
+ return -ENOTTY;
+ }
+ kfree(param);
+
+ return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_rt_rule_add *rt_rule_entry;
+
+ rt_rule =
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ if (!rt_rule) {
+ IPAERR("fail to alloc mem\n");
+ return -ENOMEM;
+ }
+ /* setup a default v4 route to point to A5 */
+ rt_rule->num_rules = 1;
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+ IPA_RESOURCE_NAME_MAX);
+
+ rt_rule_entry = &rt_rule->rules[0];
+ rt_rule_entry->at_rear = 1;
+ rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v4 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /* setup a default v6 route to point to A5 */
+ rt_rule->ip = IPA_IP_v6;
+ if (ipa_add_rt_rule(rt_rule)) {
+ IPAERR("fail to add dflt v6 rule\n");
+ kfree(rt_rule);
+ return -EPERM;
+ }
+ IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+ ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+ /*
+ * because these tables are the very first to be added, they will both
+ * have the same index (0) which is essential for programming the
+ * "route" end-point config
+ */
+
+ kfree(rt_rule);
+
+ return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_hdr_add *hdr_entry;
+ struct ipa_route route = { 0 };
+ int ret;
+
+ /* install the basic exception header */
+ hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+ sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdr) {
+ IPAERR("fail to alloc exception hdr\n");
+ return -ENOMEM;
+ }
+ hdr->num_hdrs = 1;
+ hdr->commit = 1;
+ hdr_entry = &hdr->hdr[0];
+ strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+
+ /*
+ * only single stream for MBIM supported and no exception packets
+ * expected so set default header to zero
+ */
+ hdr_entry->hdr_len = 1;
+ hdr_entry->hdr[0] = 0;
+
+ /*
+ * SW does not know anything about default exception header so
+ * we don't set it. IPA HW will use it as a template
+ */
+ if (ipa_add_hdr(hdr)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ if (hdr_entry->status) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+ /* exception packets goto LAN-WAN pipe from IPA to A5 */
+ route.route_def_pipe = IPA_A5_LAN_WAN_IN;
+ route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+ if (ipa_cfg_route(&route)) {
+ IPAERR("fail to add exception hdr\n");
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ kfree(hdr);
+ return ret;
+}
+
+static void ipa_handle_tx_poll_for_pipe(struct ipa_sys_context *sys)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt, *t;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ int ret;
+
+ while (1) {
+ iov.addr = 0;
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ pr_err("%s: sps_get_iovec failed %d\n", __func__, ret);
+ break;
+ }
+ if (!iov.addr)
+ break;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ tx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ switch (tx_pkt->cnt) {
+ case 1:
+ ipa_write_done(&tx_pkt->work);
+ break;
+ case 0xFFFF:
+ /* reached end of set */
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(tx_pkt, t,
+ &sys->wait_desc_list, link) {
+ list_del(&tx_pkt->link);
+ list_add(&tx_pkt->link, &sys->head_desc_list);
+ }
+ tx_pkt =
+ list_first_entry(&sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper, link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ ipa_write_done(&tx_pkt->work);
+ break;
+ default:
+ /* keep looping till reach the end of the set */
+ spin_lock_irqsave(&sys->spinlock,
+ irq_flags);
+ list_del(&tx_pkt->link);
+ list_add_tail(&tx_pkt->link,
+ &sys->wait_desc_list);
+ spin_unlock_irqrestore(&sys->spinlock,
+ irq_flags);
+ break;
+ }
+ }
+}
+
+static void ipa_poll_function(struct work_struct *work)
+{
+ int ret;
+ int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
+ IPA_A5_WLAN_AMPDU_OUT };
+ int i;
+ int num_tx_pipes;
+
+ /* check all the system pipes for tx completions and rx available */
+ if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
+ ipa_handle_rx_core();
+
+ num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
+
+ if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
+ num_tx_pipes--;
+
+ for (i = 0; i < num_tx_pipes; i++)
+ if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
+ ipa_handle_tx_poll_for_pipe(&ipa_ctx->sys[tx_pipes[i]]);
+
+ /* re-post the poll work */
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+
+ return;
+}
+
+static int ipa_setup_a5_pipes(void)
+{
+ struct ipa_sys_connect_params sys_in;
+ int result = 0;
+
+ /* CMD OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_CMD_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail;
+ }
+
+ if (ipa_setup_exception_path()) {
+ IPAERR(":fail to setup excp path\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+
+ /* LAN-WAN IN (IPA->A5) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+ sys_in.ipa_ep_cfg.hdr.hdr_len = 8; /* size of A5 exception hdr */
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_cmd;
+ }
+ /* LAN-WAN OUT (A5->IPA) */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+ IPAERR(":setup sys pipe failed.\n");
+ result = -EPERM;
+ goto fail_data_out;
+ }
+ if (ipa_ctx->polling_mode) {
+ INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+ result =
+ schedule_delayed_work_on(smp_processor_id(),
+ &ipa_ctx->poll_work,
+ msecs_to_jiffies(polling_delay_ms));
+ if (!result) {
+ IPAERR(":schedule delayed work failed.\n");
+ goto fail_schedule_delayed_work;
+ }
+ }
+
+ return 0;
+
+fail_schedule_delayed_work:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+fail_data_out:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_cmd:
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail:
+ return result;
+}
+
+static void ipa_teardown_a5_pipes(void)
+{
+ cancel_delayed_work(&ipa_ctx->poll_work);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+ ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+ enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pdata)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int rc = 0;
+
+ if (!pdata || !pdev)
+ goto err;
+
+ /* retrieve device tree parameters */
+ for_each_child_of_node(pdev->dev.of_node, node)
+ {
+ const char *str;
+
+ rc = of_property_read_string(node, "label", &str);
+ if (rc) {
+ IPAERR("Cannot read string\n");
+ goto err;
+ }
+
+ /* Check if connection type is supported */
+ if (strncmp(str, "a2-to-ipa", 10)
+ && strncmp(str, "ipa-to-a2", 10))
+ goto err;
+
+ if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
+ && IPA_TO_A2 == pipe_dir)
+ continue; /* skip to the next pipe */
+ else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
+ && A2_TO_IPA == pipe_dir)
+ continue; /* skip to the next pipe */
+
+
+ rc = ipa_update_connections_info(node, pdata);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ IPAERR("%s: failed\n", __func__);
+
+ return rc;
+}
+
+static int ipa_update_connections_info(struct device_node *node,
+ struct a2_mux_pipe_connection *pipe_connection)
+{
+ u32 rc;
+ char *key;
+ uint32_t val;
+ enum ipa_pipe_mem_type mem_type;
+
+ if (!pipe_connection || !node)
+ goto err;
+
+ key = "qcom,src-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_phy_addr = val;
+
+ key = "qcom,ipa-bam-mem-type";
+ rc = of_property_read_u32(node, key, &mem_type);
+ if (rc)
+ goto err;
+ pipe_connection->mem_type = mem_type;
+
+ key = "qcom,src-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_pipe_index = val;
+
+ key = "qcom,dst-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_phy_addr = val;
+
+ key = "qcom,dst-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_pipe_index = val;
+
+ key = "qcom,data-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_base_offset = val;
+
+ key = "qcom,data-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_size = val;
+
+ key = "qcom,descriptor-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->desc_fifo_base_offset = val;
+
+ key = "qcom,descriptor-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+
+ pipe_connection->desc_fifo_size = val;
+
+ return 0;
+err:
+ IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+
+ return rc;
+}
+
+/**
+* ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
+*
+* @pipe_dir: pipe direction
+* @pipe_connect: connect structure containing the parameters fetched from DTS
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect)
+{
+ if (!pipe_connect) {
+ IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
+ return -EFAULT;
+ }
+
+ switch (pipe_dir) {
+ case A2_TO_IPA:
+ *pipe_connect = ipa_res.a2_to_ipa_pipe;
+ break;
+ case IPA_TO_A2:
+ *pipe_connect = ipa_res.ipa_to_a2_pipe;
+ break;
+ default:
+ IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void ipa_set_aggregation_params(void)
+{
+ struct ipa_ep_cfg_aggr agg_params;
+ u32 producer_hdl = 0;
+ u32 consumer_hdl = 0;
+
+ rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+
+ agg_params.aggr = ipa_ctx->aggregation_type;
+ agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
+ agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
+
+ /* configure aggregation on producer */
+ agg_params.aggr_en = IPA_ENABLE_AGGR;
+ ipa_cfg_ep_aggr(producer_hdl, &agg_params);
+
+ /* configure deaggregation on consumer */
+ agg_params.aggr_en = IPA_ENABLE_DEAGGR;
+ ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
+
+}
+
+/*
+ * The following device attributes are for configuring the aggregation
+ * attributes when the driver is already running.
+ * The attributes are for configuring the aggregation type
+ * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
+ * time limit.
+ */
+static ssize_t ipa_show_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ memset(str, 0, sizeof(str));
+
+ switch (ipa_ctx->aggregation_type) {
+ case IPA_MBIM_16:
+ strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
+ break;
+ case IPA_MBIM_32:
+ strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
+ break;
+ case IPA_TLP:
+ strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
+ break;
+ default:
+ strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
+ break;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
+ ipa_ctx->aggregation_type = IPA_MBIM_32;
+ else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
+ ipa_ctx->aggregation_type = IPA_TLP;
+ else {
+ IPAERR("ipa_store_aggregation_type wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_type,
+ ipa_store_aggregation_type);
+
+static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
+ ipa_ctx->aggregation_byte_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH];
+ char *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_byte_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_byte_limit,
+ ipa_store_aggregation_byte_limit);
+
+static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret_val;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ ret_val = scnprintf(buf,
+ PAGE_SIZE,
+ "%u\n",
+ ipa_ctx->aggregation_time_limit);
+
+ return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+ u32 ret = 0;
+
+ if (!buf) {
+ IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
+ return -EINVAL;
+ }
+
+ strlcpy(str, buf, sizeof(str));
+ pstr = strim(str);
+
+ if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+ IPAERR("ipa_store_aggregation_time_limit wrong input\n");
+ return -EINVAL;
+ }
+
+ ipa_ctx->aggregation_time_limit = ret;
+
+ ipa_set_aggregation_params();
+
+ return count;
+}
+
+static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
+ ipa_show_aggregation_time_limit,
+ ipa_store_aggregation_time_limit);
+
+static const struct file_operations ipa_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_open,
+ .read = ipa_read,
+ .unlocked_ioctl = ipa_ioctl,
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+ ipa_cnoc_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(ipa_cnoc_clk)) {
+ ipa_cnoc_clk = NULL;
+ IPAERR("fail to get cnoc clk\n");
+ return -ENODEV;
+ }
+
+ ipa_clk_src = clk_get(dev, "core_src_clk");
+ if (IS_ERR(ipa_clk_src)) {
+ ipa_clk_src = NULL;
+ IPAERR("fail to get ipa clk src\n");
+ return -ENODEV;
+ }
+
+ ipa_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(ipa_clk)) {
+ ipa_clk = NULL;
+ IPAERR("fail to get ipa clk\n");
+ return -ENODEV;
+ }
+
+ sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+ if (IS_ERR(sys_noc_ipa_axi_clk)) {
+ sys_noc_ipa_axi_clk = NULL;
+ IPAERR("fail to get sys_noc_ipa_axi clk\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+ if (ipa_cnoc_clk) {
+ clk_prepare(ipa_cnoc_clk);
+ clk_enable(ipa_cnoc_clk);
+ clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+ } else {
+ WARN_ON(1);
+ }
+
+ if (ipa_clk_src)
+ clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_prepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_prepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_enable(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (sys_noc_ipa_axi_clk)
+ clk_enable(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+ if (sys_noc_ipa_axi_clk)
+ clk_disable_unprepare(sys_noc_ipa_axi_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_clk)
+ clk_disable_unprepare(ipa_clk);
+ else
+ WARN_ON(1);
+
+ if (ipa_cnoc_clk)
+ clk_disable_unprepare(ipa_cnoc_clk);
+ else
+ WARN_ON(1);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+ void *bam_cnfg_bits;
+
+ bam_cnfg_bits = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+ IPA_BAM_REMAP_SIZE);
+ if (!bam_cnfg_bits)
+ return -ENOMEM;
+ ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
+ IPA_BAM_CNFG_BITS_VAL);
+ iounmap(bam_cnfg_bits);
+
+ return 0;
+}
+/**
+* ipa_init() - Initialize the IPA Driver
+*@resource_p: contain platform specific values from DST file
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+* 1)parsed values from the dts file
+* 2)parameters passed to the module initialization
+* 3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+* is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+* routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p)
+{
+ int result = 0;
+ int i;
+ struct sps_bam_props bam_props = { 0 };
+ struct ipa_flt_tbl *flt_tbl;
+ struct ipa_rt_tbl_set *rset;
+
+ IPADBG("IPA init\n");
+
+ ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+ if (!ipa_ctx) {
+ IPAERR(":kzalloc err.\n");
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+
+ IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
+ ipa_ctx->polling_mode = polling_mode;
+ IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+ hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
+ ip6_flt_tbl_lcl);
+ ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
+ ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
+ ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
+ ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
+ ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
+
+ ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+
+ /* setup IPA register access */
+ ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
+ resource_p->ipa_mem_size);
+ if (!ipa_ctx->mmio) {
+ IPAERR(":ipa-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_remap;
+ }
+ /* do POR programming to setup HW */
+ result = ipa_init_hw();
+ if (result) {
+ IPAERR(":error initializing driver.\n");
+ result = -ENODEV;
+ goto fail_init_hw;
+ }
+ /* read how much SRAM is available for SW use */
+ ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+ IPA_SHARED_MEM_SIZE_OFST);
+
+ if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
+ IPAERR("SW expect more core memory, needed %d, avail %d\n",
+ IPA_RAM_END_OFST, ipa_ctx->smem_sz);
+ result = -ENOMEM;
+ goto fail_init_hw;
+ }
+ /* register IPA with SPS driver */
+ bam_props.phys_addr = resource_p->bam_mem_base;
+ bam_props.virt_addr = ioremap(resource_p->bam_mem_base,
+ resource_p->bam_mem_size);
+ if (!bam_props.virt_addr) {
+ IPAERR(":bam-base ioremap err.\n");
+ result = -EFAULT;
+ goto fail_bam_remap;
+ }
+ bam_props.virt_size = resource_p->bam_mem_size;
+ bam_props.irq = resource_p->bam_irq;
+ bam_props.num_pipes = IPA_NUM_PIPES;
+ bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+ bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+
+ result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+ if (result) {
+ IPAERR(":bam register err.\n");
+ result = -ENODEV;
+ goto fail_bam_register;
+ }
+
+ if (ipa_setup_bam_cfg(resource_p)) {
+ IPAERR(":bam cfg err.\n");
+ result = -ENODEV;
+ goto fail_flt_rule_cache;
+ }
+
+ /* set up the default op mode */
+ ipa_ctx->mode = IPA_MODE_USB_DONGLE;
+
+ /* init the lookaside cache */
+ ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
+ sizeof(struct ipa_flt_entry), 0, 0, NULL);
+ if (!ipa_ctx->flt_rule_cache) {
+ IPAERR(":ipa flt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_flt_rule_cache;
+ }
+ ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
+ sizeof(struct ipa_rt_entry), 0, 0, NULL);
+ if (!ipa_ctx->rt_rule_cache) {
+ IPAERR(":ipa rt cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_rule_cache;
+ }
+ ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
+ sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+ if (!ipa_ctx->hdr_cache) {
+ IPAERR(":ipa hdr cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_cache;
+ }
+ ipa_ctx->hdr_offset_cache =
+ kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
+ 0, 0, NULL);
+ if (!ipa_ctx->hdr_offset_cache) {
+ IPAERR(":ipa hdr off cache create failed\n");
+ result = -ENOMEM;
+ goto fail_hdr_offset_cache;
+ }
+ ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
+ sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+ if (!ipa_ctx->rt_tbl_cache) {
+ IPAERR(":ipa rt tbl cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rt_tbl_cache;
+ }
+ ipa_ctx->tx_pkt_wrapper_cache =
+ kmem_cache_create("IPA TX PKT WRAPPER",
+ sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->tx_pkt_wrapper_cache) {
+ IPAERR(":ipa tx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tx_pkt_wrapper_cache;
+ }
+ ipa_ctx->rx_pkt_wrapper_cache =
+ kmem_cache_create("IPA RX PKT WRAPPER",
+ sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+ if (!ipa_ctx->rx_pkt_wrapper_cache) {
+ IPAERR(":ipa rx pkt wrapper cache create failed\n");
+ result = -ENOMEM;
+ goto fail_rx_pkt_wrapper_cache;
+ }
+ ipa_ctx->tree_node_cache =
+ kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
+ NULL);
+ if (!ipa_ctx->tree_node_cache) {
+ IPAERR(":ipa tree node cache create failed\n");
+ result = -ENOMEM;
+ goto fail_tree_node_cache;
+ }
+
+ /*
+ * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
+ * size 512 bytes
+ */
+ ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k", NULL,
+ IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
+ IPA_DMA_POOL_BOUNDARY);
+ if (!ipa_ctx->one_kb_no_straddle_pool) {
+ IPAERR("cannot setup 1kb alloc DMA pool.\n");
+ result = -ENOMEM;
+ goto fail_dma_pool;
+ }
+
+ ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+ ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+ /* init the various list heads */
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+ INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+ }
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+ flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+ INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+ flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+ }
+
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+ mutex_init(&ipa_ctx->lock);
+ mutex_init(&ipa_ctx->nat_mem.lock);
+
+ for (i = 0; i < IPA_A5_SYS_MAX; i++) {
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
+ spin_lock_init(&ipa_ctx->sys[i].spinlock);
+ if (i != IPA_A5_WLAN_AMPDU_OUT)
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
+ else
+ ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
+ INIT_LIST_HEAD(&ipa_ctx->sys[i].wait_desc_list);
+ }
+
+ ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
+ if (!ipa_ctx->rx_wq) {
+ IPAERR(":fail to create rx wq\n");
+ result = -ENOMEM;
+ goto fail_rx_wq;
+ }
+
+ ipa_ctx->tx_wq = create_singlethread_workqueue("ipa tx wq");
+ if (!ipa_ctx->tx_wq) {
+ IPAERR(":fail to create tx wq\n");
+ result = -ENOMEM;
+ goto fail_tx_wq;
+ }
+
+ ipa_ctx->hdr_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
+ ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
+ ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
+
+ atomic_set(&ipa_ctx->ipa_active_clients, 0);
+
+ result = ipa_bridge_init();
+ if (result) {
+ IPAERR("ipa bridge init err.\n");
+ result = -ENODEV;
+ goto fail_bridge_init;
+ }
+
+ /* setup the A5-IPA pipes */
+ if (ipa_setup_a5_pipes()) {
+ IPAERR(":failed to setup IPA-A5 pipes.\n");
+ result = -ENODEV;
+ goto fail_a5_pipes;
+ }
+
+ ipa_replenish_rx_cache();
+
+ /* init the filtering block */
+ ipa_commit_flt(IPA_IP_v4);
+ ipa_commit_flt(IPA_IP_v6);
+
+ /*
+ * setup an empty routing table in system memory, this will be used
+ * to delete a routing table cleanly and safely
+ */
+ ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+ ipa_ctx->empty_rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
+ &ipa_ctx->empty_rt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!ipa_ctx->empty_rt_tbl_mem.base) {
+ IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+ ipa_ctx->empty_rt_tbl_mem.size);
+ result = -ENOMEM;
+ goto fail_empty_rt_tbl;
+ }
+ memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+ ipa_ctx->empty_rt_tbl_mem.size);
+
+ /* setup the IPA pipe mem pool */
+ ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+ resource_p->ipa_pipe_mem_size);
+
+ ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+ result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+ ipa_ctx, DRV_NAME);
+ if (IS_ERR(ipa_ctx->dev)) {
+ IPAERR(":device_create err.\n");
+ result = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+ ipa_ctx->cdev.owner = THIS_MODULE;
+ ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
+
+ result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ /* default aggregation parameters */
+ ipa_ctx->aggregation_type = IPA_MBIM_16;
+ ipa_ctx->aggregation_byte_limit = 1;
+ ipa_ctx->aggregation_time_limit = 0;
+ IPADBG(":IPA driver init OK.\n");
+
+ /* gate IPA clocks */
+ ipa_disable_clks();
+
+ return 0;
+
+fail_cdev_add:
+ device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ if (ipa_ctx->pipe_mem_pool)
+ gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+ dma_free_coherent(NULL,
+ ipa_ctx->empty_rt_tbl_mem.size,
+ ipa_ctx->empty_rt_tbl_mem.base,
+ ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_empty_rt_tbl:
+ ipa_cleanup_rx();
+ ipa_teardown_a5_pipes();
+fail_a5_pipes:
+ ipa_bridge_cleanup();
+fail_bridge_init:
+ destroy_workqueue(ipa_ctx->tx_wq);
+fail_tx_wq:
+ destroy_workqueue(ipa_ctx->rx_wq);
+fail_rx_wq:
+ dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+fail_dma_pool:
+ kmem_cache_destroy(ipa_ctx->tree_node_cache);
+fail_tree_node_cache:
+ kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+ kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+ kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+ kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+ kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+ sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_bam_register:
+ iounmap(bam_props.virt_addr);
+fail_bam_remap:
+fail_init_hw:
+ iounmap(ipa_ctx->mmio);
+fail_remap:
+ kfree(ipa_ctx);
+ ipa_ctx = NULL;
+fail_mem:
+ /* gate IPA clocks */
+ ipa_disable_clks();
+ return result;
+}
+
+static int ipa_plat_drv_probe(struct platform_device *pdev_p)
+{
+ int result = 0;
+ struct resource *resource_p;
+ IPADBG("IPA plat drv probe\n");
+
+ /* initialize ipa_res */
+ ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+ ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+
+ result = ipa_load_pipe_connection(pdev_p,
+ A2_TO_IPA,
+ &ipa_res.a2_to_ipa_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
+ &ipa_res.ipa_to_a2_pipe);
+ if (0 != result)
+ IPAERR(":ipa_load_pipe_connection failed!\n");
+
+ /* Get IPA wrapper address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_mem_base = resource_p->start;
+ ipa_res.ipa_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA BAM address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "bam-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_mem_base = resource_p->start;
+ ipa_res.bam_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA pipe mem start ofst */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "ipa-pipe-mem");
+
+ if (!resource_p) {
+ IPADBG(":get resource failed for ipa-pipe-mem\n");
+ } else {
+ ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
+ ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
+ }
+
+ /* Get IPA IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "ipa-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for ipa-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.ipa_irq = resource_p->start;
+ }
+
+ /* Get IPA BAM IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "bam-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for bam-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.bam_irq = resource_p->start;
+ }
+
+ IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
+ ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
+ IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
+ ipa_res.bam_mem_base, ipa_res.bam_mem_size);
+ IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
+ ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
+
+ IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
+ IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
+
+ /* stash the IPA dev ptr */
+ ipa_dev = &pdev_p->dev;
+
+ /* get IPA clocks */
+ if (ipa_get_clks(ipa_dev) != 0)
+ return -ENODEV;
+
+ /* enable IPA clocks */
+ ipa_enable_clks();
+
+ /* Proceed to real initialization */
+ result = ipa_init(&ipa_res);
+ if (result)
+ IPAERR("ipa_init failed\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_type);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_byte_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ result = device_create_file(&pdev_p->dev,
+ &dev_attr_aggregation_time_limit);
+ if (result)
+ IPAERR("failed to create device file\n");
+
+ return result;
+}
+
+static struct platform_driver ipa_plat_drv = {
+ .probe = ipa_plat_drv_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipa_plat_drv_match,
+ },
+};
+
+static int ipa_plat_drv_init(void)
+{
+ return platform_driver_register(&ipa_plat_drv);
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+ return ipa_ctx;
+}
+
+static int __init ipa_module_init(void)
+{
+ int result = 0;
+
+ IPADBG("IPA module init\n");
+ ipa_debugfs_init();
+ /* Register as a platform device driver */
+ result = ipa_plat_drv_init();
+
+ return result;
+}
+
+late_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
new file mode 100644
index 0000000..cf51ab6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include "ipa_i.h"
+
+enum ipa_bridge_id {
+ IPA_DL_FROM_A2,
+ IPA_DL_TO_IPA,
+ IPA_UL_FROM_IPA,
+ IPA_UL_TO_A2,
+ IPA_BRIDGE_ID_MAX
+};
+
+static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
+static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
+static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+
+struct ipa_pkt_info {
+ void *buffer;
+ dma_addr_t dma_address;
+ uint32_t len;
+ struct list_head list_node;
+};
+
+struct ipa_bridge_pipe_context {
+ struct list_head head_desc_list;
+ struct sps_pipe *pipe;
+ struct sps_connect connection;
+ struct sps_mem_buffer desc_mem_buf;
+ struct sps_register_event register_event;
+ spinlock_t spinlock;
+ u32 len;
+ u32 free_len;
+ struct list_head free_desc_list;
+};
+
+static struct ipa_bridge_pipe_context bridge[IPA_BRIDGE_ID_MAX];
+
+static struct workqueue_struct *ipa_ul_workqueue;
+static struct workqueue_struct *ipa_dl_workqueue;
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir);
+
+static u32 alloc_cnt[IPA_DIR_MAX];
+
+static void ul_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_UL);
+}
+
+static void dl_work_func(struct work_struct *work)
+{
+ ipa_do_bridge_work(IPA_DL);
+}
+
+static DECLARE_WORK(ul_work, ul_work_func);
+static DECLARE_WORK(dl_work, dl_work_func);
+
+static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->register_event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir)
+{
+ int ret;
+ struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ goto fail;
+ }
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+static int queue_rx_single(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_pkt_info *info;
+ int ret;
+
+ info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
+ if (!info) {
+ IPAERR("unable to alloc rx_pkt_info\n");
+ goto fail_pkt;
+ }
+
+ info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!info->buffer) {
+ IPAERR("unable to alloc rx_pkt_buffer\n");
+ goto fail_buffer;
+ }
+
+ info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (info->dma_address == 0 || info->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)info->dma_address, info->buffer);
+ goto fail_dma;
+ }
+
+ info->len = ~0;
+
+ list_add_tail(&info->list_node, &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
+ IPA_RX_SKB_SIZE, info,
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&info->list_node);
+ dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_dma;
+ }
+ sys_rx->len++;
+ return 0;
+
+fail_dma:
+ kfree(info->buffer);
+fail_buffer:
+ kfree(info);
+fail_pkt:
+ IPAERR("failed\n");
+ return -ENOMEM;
+}
+
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+ struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
+ struct ipa_pkt_info *tx_pkt;
+ struct ipa_pkt_info *rx_pkt;
+ struct ipa_pkt_info *tmp_pkt;
+ struct sps_iovec iov;
+ int ret;
+ int inactive_cycles = 0;
+
+ while (1) {
+ ++inactive_cycles;
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_tx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_move_tail(&tx_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->len--;
+ sys_tx->free_len++;
+ tx_pkt->len = ~0;
+ }
+
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_rx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ /* no-op */
+ } else {
+ inactive_cycles = 0;
+
+ rx_pkt = list_first_entry(&sys_rx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&rx_pkt->list_node);
+ sys_rx->len--;
+ rx_pkt->len = iov.size;
+
+retry_alloc_tx:
+ if (list_empty(&sys_tx->free_desc_list)) {
+ tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
+ GFP_KERNEL);
+ if (!tmp_pkt) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+ __func__);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp_pkt->buffer) {
+ pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+ __func__);
+ kfree(tmp_pkt);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_alloc_tx;
+ }
+
+ tmp_pkt->dma_address = dma_map_single(NULL,
+ tmp_pkt->buffer,
+ IPA_RX_SKB_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (tmp_pkt->dma_address == 0 ||
+ tmp_pkt->dma_address == ~0) {
+ pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+ __func__,
+ (void *)tmp_pkt->dma_address,
+ tmp_pkt->buffer);
+ }
+
+ list_add_tail(&tmp_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->free_len++;
+ alloc_cnt[dir]++;
+
+ tmp_pkt->len = ~0;
+ }
+
+ tx_pkt = list_first_entry(&sys_tx->free_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_del(&tx_pkt->list_node);
+ sys_tx->free_len--;
+
+retry_add_rx:
+ list_add_tail(&tx_pkt->list_node,
+ &sys_rx->head_desc_list);
+ ret = sps_transfer_one(sys_rx->pipe,
+ tx_pkt->dma_address,
+ IPA_RX_SKB_SIZE,
+ tx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ list_del(&tx_pkt->list_node);
+ pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+ __func__, ret);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_rx;
+ }
+ sys_rx->len++;
+
+retry_add_tx:
+ list_add_tail(&rx_pkt->list_node,
+ &sys_tx->head_desc_list);
+ ret = sps_transfer_one(sys_tx->pipe,
+ rx_pkt->dma_address,
+ iov.size,
+ rx_pkt,
+ SPS_IOVEC_FLAG_INT |
+ SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+ __func__, dir);
+ list_del(&rx_pkt->list_node);
+ usleep_range(polling_min_sleep[dir],
+ polling_max_sleep[dir]);
+ goto retry_add_tx;
+ }
+ sys_tx->len++;
+ }
+
+ if (inactive_cycles >= polling_inactivity[dir]) {
+ ipa_switch_to_intr_mode(dir);
+ break;
+ }
+ }
+}
+
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_UL);
+ queue_work(ipa_ul_workqueue, &ul_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct ipa_ep_cfg_mode mode;
+ dma_addr_t dma_addr;
+ int ipa_ep_idx;
+ int ret;
+ int i;
+
+ if (dir == IPA_DL) {
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_PROD);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_TO_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = ipa_ctx->bam_handle;
+ sys->connection.dest_pipe_index = ipa_ep_idx;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+
+ mode.mode = IPA_DMA;
+ mode.dst = IPA_CLIENT_USB_CONS;
+ ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
+ if (ret < 0) {
+ IPAERR("DMA mode set error %d\n", ret);
+ goto tx_mode_set_failed;
+ }
+
+ return 0;
+
+tx_mode_set_failed:
+ sps_disconnect(sys->pipe);
+tx_connect_failed:
+ dma_free_coherent(NULL, sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else {
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ IPA_CLIENT_A2_TETHERED_CONS);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ ret = -EINVAL;
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = ipa_ctx->bam_handle;
+ sys->connection.src_pipe_index = 7;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = ipa_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+static void bam_mux_rx_notify(struct sps_event_notify *notify)
+{
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ ipa_switch_to_poll_mode(IPA_DL);
+ queue_work(ipa_dl_workqueue, &dl_work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+ struct a2_mux_pipe_connection pipe_conn = { 0, };
+ dma_addr_t dma_addr;
+ u32 a2_handle;
+ int ret;
+ int i;
+
+ if (dir == IPA_UL) {
+ ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
+ goto tx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto tx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_UL_TO_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("tx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto tx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("tx get config failed %d\n", ret);
+ goto tx_get_config_failed;
+ }
+
+ sys->connection.source = SPS_DEV_HANDLE_MEM;
+ sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.destination = a2_handle;
+ sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
+ sys->connection.mode = SPS_MODE_DEST;
+ sys->connection.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("tx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto tx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("tx connect error %d\n", ret);
+ goto tx_connect_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+ return 0;
+
+tx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+ return ret;
+ } else { /* dir == IPA_UL */
+
+ ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
+ if (ret) {
+ IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
+ goto rx_alloc_endpoint_failed;
+ }
+
+ ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
+ if (ret) {
+ IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+ goto rx_alloc_endpoint_failed;
+ }
+
+ sys = &bridge[IPA_DL_FROM_A2];
+ sys->pipe = sps_alloc_endpoint();
+ if (sys->pipe == NULL) {
+ IPAERR("rx alloc endpoint failed\n");
+ ret = -ENOMEM;
+ goto rx_alloc_endpoint_failed;
+ }
+ ret = sps_get_config(sys->pipe, &sys->connection);
+ if (ret) {
+ IPAERR("rx get config failed %d\n", ret);
+ goto rx_get_config_failed;
+ }
+
+ sys->connection.source = a2_handle;
+ sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
+ sys->connection.destination = SPS_DEV_HANDLE_MEM;
+ sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+ sys->connection.mode = SPS_MODE_SRC;
+ sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ SPS_O_ACK_TRANSFERS;
+ sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+ sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+ sys->desc_mem_buf.size,
+ &dma_addr,
+ 0);
+ if (sys->desc_mem_buf.base == NULL) {
+ IPAERR("rx memory alloc failed\n");
+ ret = -ENOMEM;
+ goto rx_get_config_failed;
+ }
+ sys->desc_mem_buf.phys_base = dma_addr;
+ memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+ sys->connection.desc = sys->desc_mem_buf;
+ sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+ ret = sps_connect(sys->pipe, &sys->connection);
+ if (ret < 0) {
+ IPAERR("rx connect error %d\n", ret);
+ goto rx_connect_failed;
+ }
+
+ sys->register_event.options = SPS_O_EOT;
+ sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+ sys->register_event.xfer_done = NULL;
+ sys->register_event.callback = bam_mux_rx_notify;
+ sys->register_event.user = NULL;
+ ret = sps_register_event(sys->pipe, &sys->register_event);
+ if (ret < 0) {
+ IPAERR("tx register event error %d\n", ret);
+ goto rx_event_reg_failed;
+ }
+
+ INIT_LIST_HEAD(&sys->head_desc_list);
+ INIT_LIST_HEAD(&sys->free_desc_list);
+ spin_lock_init(&sys->spinlock);
+
+
+ for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+ ret = queue_rx_single(dir);
+ if (ret < 0)
+ IPAERR("queue fail %d %d\n", dir, i);
+ }
+
+ return 0;
+
+rx_event_reg_failed:
+ sps_disconnect(sys->pipe);
+rx_connect_failed:
+ dma_free_coherent(NULL,
+ sys->desc_mem_buf.size,
+ sys->desc_mem_buf.base,
+ sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+ sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+ return ret;
+ }
+}
+
+/**
+ * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL
+ * workqueues
+ *
+ * Return codes: 0: success, -ENOMEM: failure
+ */
+int ipa_bridge_init(void)
+{
+ int ret;
+
+ ipa_ul_workqueue = alloc_workqueue("ipa_ul",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_ul_workqueue) {
+ IPAERR("ipa ul wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_ul;
+ }
+
+ ipa_dl_workqueue = alloc_workqueue("ipa_dl",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!ipa_dl_workqueue) {
+ IPAERR("ipa dl wq alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_dl;
+ }
+
+ return 0;
+fail_dl:
+ destroy_workqueue(ipa_ul_workqueue);
+fail_ul:
+ return ret;
+}
+
+/**
+ * ipa_bridge_setup() - setup tethered SW bridge in specified direction
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: success
+ * various negative error codes on errors
+ */
+int ipa_bridge_setup(enum ipa_bridge_dir dir)
+{
+ int ret;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (setup_bridge_to_a2(dir)) {
+ IPAERR("fail to setup SYS pipe to A2 %d\n", dir);
+ ret = -EINVAL;
+ goto bail_a2;
+ }
+
+ if (setup_bridge_to_ipa(dir)) {
+ IPAERR("fail to setup SYS pipe to IPA %d\n", dir);
+ ret = -EINVAL;
+ goto bail_ipa;
+ }
+
+ return 0;
+
+bail_ipa:
+ if (dir == IPA_UL)
+ sps_disconnect(bridge[IPA_UL_TO_A2].pipe);
+ else
+ sps_disconnect(bridge[IPA_DL_FROM_A2].pipe);
+bail_a2:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+ return ret;
+}
+
+/**
+ * ipa_bridge_teardown() - teardown the tethered bridge in the specified dir
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: always
+ */
+int ipa_bridge_teardown(enum ipa_bridge_dir dir)
+{
+ struct ipa_bridge_pipe_context *sys;
+
+ if (dir == IPA_UL) {
+ sys = &bridge[IPA_UL_TO_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_UL_FROM_IPA];
+ sps_disconnect(sys->pipe);
+ } else {
+ sys = &bridge[IPA_DL_FROM_A2];
+ sps_disconnect(sys->pipe);
+ sys = &bridge[IPA_DL_TO_IPA];
+ sps_disconnect(sys->pipe);
+ }
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+
+/**
+ * ipa_bridge_cleanup() - de-initialize the tethered bridge
+ *
+ * Return codes:
+ * None
+ */
+void ipa_bridge_cleanup(void)
+{
+ destroy_workqueue(ipa_dl_workqueue);
+ destroy_workqueue(ipa_ul_workqueue);
+}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
new file mode 100644
index 0000000..823b17d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+ struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+ int result = -EFAULT;
+
+ /* Default Config */
+ ep->ep_hdl = sps_alloc_endpoint();
+
+ if (ep->ep_hdl == NULL) {
+ IPAERR("SPS EP alloc failed EP.\n");
+ return -EFAULT;
+ }
+
+ result = sps_get_config(ep->ep_hdl,
+ &ep->connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ return -EFAULT;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(in->client)) {
+ ep->connect.mode = SPS_MODE_SRC;
+ ep->connect.destination =
+ in->client_bam_hdl;
+ ep->connect.source = ipa_ctx->bam_handle;
+ ep->connect.dest_pipe_index =
+ in->client_ep_idx;
+ ep->connect.src_pipe_index = ipa_ep_idx;
+ } else {
+ ep->connect.mode = SPS_MODE_DEST;
+ ep->connect.source = in->client_bam_hdl;
+ ep->connect.destination = ipa_ctx->bam_handle;
+ ep->connect.src_pipe_index = in->client_ep_idx;
+ ep->connect.dest_pipe_index = ipa_ep_idx;
+ }
+
+ return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+ struct sps_mem_buffer *mem_buff_ptr,
+ bool *fifo_in_pipe_mem_ptr,
+ u32 *fifo_pipe_mem_ofst_ptr,
+ u32 fifo_size, int ipa_ep_idx)
+{
+ dma_addr_t dma_addr;
+ u32 ofst;
+ int result = -EFAULT;
+
+ mem_buff_ptr->size = fifo_size;
+ if (in->pipe_mem_preferred) {
+ if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+ IPAERR("FIFO pipe mem alloc fail ep %u\n",
+ ipa_ep_idx);
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL,
+ mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ } else {
+ memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+ result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+ fifo_size, 1);
+ WARN_ON(result);
+ *fifo_in_pipe_mem_ptr = 1;
+ dma_addr = mem_buff_ptr->phys_base;
+ *fifo_pipe_mem_ofst_ptr = ofst;
+ }
+ } else {
+ mem_buff_ptr->base =
+ dma_alloc_coherent(NULL, mem_buff_ptr->size,
+ &dma_addr, GFP_KERNEL);
+ }
+ mem_buff_ptr->phys_base = dma_addr;
+ if (mem_buff_ptr->base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in: [in] input parameters from client
+ * @sps: [out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl: [out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int ipa_ep_idx_dst;
+ int result = -EFAULT;
+ struct ipa_ep_context *ep;
+
+ if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+ ipa_enable_clks();
+
+ if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+ in->client >= IPA_CLIENT_MAX ||
+ in->ipa_ep_cfg.mode.dst >= IPA_CLIENT_MAX ||
+ in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to alloc EP.\n");
+ goto fail;
+ }
+
+ ep = &ipa_ctx->ep[ipa_ep_idx];
+
+ if (ep->valid) {
+ IPAERR("EP already allocated.\n");
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_PROD(in->client) &&
+ (in->ipa_ep_cfg.mode.mode == IPA_DMA)) {
+ ipa_ep_idx_dst = ipa_get_ep_mapping(ipa_ctx->mode,
+ in->ipa_ep_cfg.mode.dst);
+ if ((ipa_ep_idx_dst == -1) ||
+ (ipa_ctx->ep[ipa_ep_idx_dst].valid)) {
+ IPADBG("dst EP for IPA input pipe doesn't yet exist\n");
+ }
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ep->valid = 1;
+ ep->client = in->client;
+ ep->notify = in->notify;
+ ep->priv = in->priv;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to configure SPS.\n");
+ goto ipa_cfg_ep_fail;
+ }
+
+ if (in->desc.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+ &ep->desc_fifo_in_pipe_mem,
+ &ep->desc_fifo_pipe_mem_ofst,
+ in->desc_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DESC FIFO.\n");
+ goto desc_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DESC FIFO\n");
+ ep->connect.desc = in->desc;
+ ep->desc_fifo_client_allocated = 1;
+ }
+ IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
+ ep->connect.desc.size);
+
+ if (in->data.base == NULL) {
+ result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+ &ep->data_fifo_in_pipe_mem,
+ &ep->data_fifo_pipe_mem_ofst,
+ in->data_fifo_sz, ipa_ep_idx);
+ if (result) {
+ IPAERR("fail to allocate DATA FIFO.\n");
+ goto data_mem_alloc_fail;
+ }
+ } else {
+ IPADBG("client allocated DATA FIFO\n");
+ ep->connect.data = in->data;
+ ep->data_fifo_client_allocated = 1;
+ }
+ IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
+ ep->connect.data.size);
+
+ ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+ ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
+
+ result = sps_connect(ep->ep_hdl, &ep->connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto sps_connect_fail;
+ }
+
+ sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+ sps->ipa_ep_idx = ipa_ep_idx;
+ *clnt_hdl = ipa_ep_idx;
+ memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+ memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+ return 0;
+
+sps_connect_fail:
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+
+data_mem_alloc_fail:
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+
+desc_mem_alloc_fail:
+ sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail:
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ result = sps_disconnect(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS disconnect failed.\n");
+ return -EPERM;
+ }
+
+ if (!ep->desc_fifo_client_allocated &&
+ ep->connect.desc.base) {
+ if (!ep->desc_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.desc.size,
+ ep->connect.desc.base,
+ ep->connect.desc.phys_base);
+ else
+ ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+ ep->connect.desc.size);
+ }
+
+ if (!ep->data_fifo_client_allocated &&
+ ep->connect.data.base) {
+ if (!ep->data_fifo_in_pipe_mem)
+ dma_free_coherent(NULL,
+ ep->connect.data.size,
+ ep->connect.data.base,
+ ep->connect.data.phys_base);
+ else
+ ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+ ep->connect.data.size);
+ }
+
+ result = sps_free_endpoint(ep->ep_hdl);
+ if (result) {
+ IPAERR("SPS de-alloc EP failed.\n");
+ return -EPERM;
+ }
+
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+ if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+ ipa_disable_clks();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
new file mode 100644
index 0000000..43b0178d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -0,0 +1,507 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "ipa_i.h"
+
+
+#define IPA_MAX_MSG_LEN 1024
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static s8 ep_reg_idx;
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_VERSION=0x%x\n"
+ "IPA_COMP_HW_VERSION=0x%x\n"
+ "IPA_ROUTE=0x%x\n"
+ "IPA_FILTER=0x%x\n"
+ "IPA_SHARED_MEM_SIZE=0x%x\n"
+ "IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
+ ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST),
+ ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST),
+ ipa_read_reg(ipa_ctx->mmio,
+ IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, buf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ if (option >= IPA_NUM_PIPES) {
+ IPAERR("bad pipe specified %u\n", option);
+ return count;
+ }
+
+ ep_reg_idx = option;
+
+ return count;
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int i;
+ int start_idx;
+ int end_idx;
+ int size = 0;
+ int ret;
+ loff_t pos;
+
+ /* negative ep_reg_idx means all registers */
+ if (ep_reg_idx < 0) {
+ start_idx = 0;
+ end_idx = IPA_NUM_PIPES;
+ } else {
+ start_idx = ep_reg_idx;
+ end_idx = start_idx + 1;
+ }
+ pos = *ppos;
+ for (i = start_idx; i < end_idx; i++) {
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_NAT_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_HDR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_MODE_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_AGGR_n_OFST(i)),
+ i, ipa_read_reg(ipa_ctx->mmio,
+ IPA_ENDP_INIT_ROUTE_n_OFST(i)));
+ *ppos = pos;
+ ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+ nbytes);
+ if (ret < 0)
+ return ret;
+
+ size += ret;
+ ubuf += nbytes;
+ count -= nbytes;
+ }
+
+ *ppos = pos + size;
+ return size;
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_hdr_entry *entry;
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "name:%s len=%d ref=%d partial=%d lcl=%d ofst=%u ",
+ entry->name,
+ entry->hdr_len, entry->ref_cnt,
+ entry->is_partial,
+ ipa_ctx->hdr_tbl_lcl,
+ entry->offset_entry->offset >> 2);
+ for (i = 0; i < entry->hdr_len; i++) {
+ scnprintf(dbg_buff + cnt + nbytes + i * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - i * 2,
+ "%02x", entry->hdr[i]);
+ }
+ scnprintf(dbg_buff + cnt + nbytes + entry->hdr_len * 2,
+ IPA_MAX_MSG_LEN - cnt - nbytes - entry->hdr_len * 2,
+ "\n");
+ cnt += nbytes + entry->hdr_len * 2 + 1;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static int ipa_attrib_dump(char *buff, size_t sz,
+ struct ipa_rule_attrib *attrib, enum ipa_ip_type ip)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ uint32_t addr[4];
+ uint32_t mask[4];
+ int i;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tos:%d ",
+ attrib->u.v4.tos);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "protocol:%d ",
+ attrib->u.v4.protocol);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.src_addr);
+ mask[0] = htonl(attrib->u.v4.src_addr_mask);
+ nbytes = scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI4 src_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.src_addr[i]);
+ mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "src_addr:%pI6 src_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ip == IPA_IP_v4) {
+ addr[0] = htonl(attrib->u.v4.dst_addr);
+ mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else if (ip == IPA_IP_v6) {
+ for (i = 0; i < 4; i++) {
+ addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+ mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+ }
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+ addr + 0, mask + 0);
+ cnt += nbytes;
+ } else {
+ WARN_ON(1);
+ }
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "src_port_range:%u %u ",
+ attrib->src_port_lo,
+ attrib->src_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt, "dst_port_range:%u %u ",
+ attrib->dst_port_lo,
+ attrib->dst_port_hi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "type:%d ",
+ attrib->type);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "code:%d ",
+ attrib->code);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "spi:%x ",
+ attrib->spi);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "src_port:%u ",
+ attrib->src_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "dst_port:%u ",
+ attrib->dst_port);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "tc:%d ",
+ attrib->u.v6.tc);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "flow_label:%x ",
+ attrib->u.v6.flow_label);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "next_hdr:%d ",
+ attrib->u.v6.next_hdr);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ nbytes =
+ scnprintf(buff + cnt, sz - cnt,
+ "metadata:%x metadata_mask:%x",
+ attrib->meta_data, attrib->meta_data_mask);
+ cnt += nbytes;
+ }
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ nbytes = scnprintf(buff + cnt, sz - cnt, "frg ");
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(buff + cnt, sz - cnt, "\n");
+ cnt += nbytes;
+
+ return cnt;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i = 0;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ u32 hdr_ofst;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (entry->hdr)
+ hdr_ofst = entry->hdr->offset_entry->offset;
+ else
+ hdr_ofst = 0;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "tbl_idx:%d tbl_name:%s tbl_ref:%u rule_idx:%d dst:%d ep:%d S:%u hdr_ofst[words]:%u attrib_mask:%08x ",
+ entry->tbl->idx, entry->tbl->name,
+ entry->tbl->ref_cnt, i, entry->rule.dst,
+ ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst),
+ !ipa_ctx->hdr_tbl_lcl,
+ hdr_ofst >> 2,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ int cnt = 0;
+ int i;
+ int j;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+ struct ipa_rt_tbl *rt_tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt += ipa_attrib_dump(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib, ip);
+ i++;
+ }
+
+ for (j = 0; j < IPA_NUM_PIPES; j++) {
+ tbl = &ipa_ctx->flt_tbl[j][ip];
+ i = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+ j, i, entry->rule.action, rt_tbl->idx,
+ entry->rule.attrib.attrib_mask);
+ cnt += nbytes;
+ cnt +=
+ ipa_attrib_dump(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ &entry->rule.attrib,
+ ip);
+ i++;
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+ .read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+ .read = ipa_read_ep_reg,
+ .write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_hdr_ops = {
+ .read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+ .read = ipa_read_rt,
+ .open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_flt_ops = {
+ .read = ipa_read_flt,
+ .open = ipa_open_dbg,
+};
+
+void ipa_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+ &ipa_gen_reg_ops);
+ if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+ IPAERR("fail to create file for debug_fs gen_reg\n");
+ goto fail;
+ }
+
+ dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+ &ipa_ep_reg_ops);
+ if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+ IPAERR("fail to create file for debug_fs ep_reg\n");
+ goto fail;
+ }
+
+ dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+ &ipa_hdr_ops);
+ if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+ IPAERR("fail to create file for debug_fs hdr\n");
+ goto fail;
+ }
+
+ dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_rt_ops);
+ if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+ IPAERR("fail to create file for debug_fs ip4 rt\n");
+ goto fail;
+ }
+
+ dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_rt_ops);
+ if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+ IPAERR("fail to create file for debug_fs ip6:w" " rt\n");
+ goto fail;
+ }
+
+ dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+ (void *)IPA_IP_v4, &ipa_flt_ops);
+ if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+ IPAERR("fail to create file for debug_fs ip4 flt\n");
+ goto fail;
+ }
+
+ dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+ (void *)IPA_IP_v6, &ipa_flt_ops);
+ if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+ IPAERR("fail to create file for debug_fs ip6 flt\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+ if (IS_ERR(dent)) {
+ IPAERR("ipa_debugfs_remove: folder was not created.\n");
+ return;
+ }
+ debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
new file mode 100644
index 0000000..c677a6e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+/**
+ * ipa_write_done - this function will be (enevtually) called when a Tx
+ * operation is complete
+ * @work: work_struct used by the work queue
+ */
+void ipa_write_done(struct work_struct *work)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+ unsigned long irq_flags;
+ struct ipa_mem_buffer mult = { 0 };
+ int i;
+ u16 cnt;
+
+ tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+ cnt = tx_pkt->cnt;
+ IPADBG("cnt=%d\n", cnt);
+
+ if (unlikely(cnt == 0))
+ WARN_ON(1);
+
+ if (cnt > 1 && cnt != 0xFFFF)
+ mult = tx_pkt->mult;
+
+ for (i = 0; i < cnt; i++) {
+ if (unlikely(tx_pkt == NULL))
+ WARN_ON(1);
+ spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
+ tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
+ struct ipa_tx_pkt_wrapper,
+ link);
+ if (unlikely(tx_pkt != tx_pkt_expected)) {
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
+ irq_flags);
+ WARN_ON(1);
+ }
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ tx_pkt->sys->len--;
+ spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ if (tx_pkt->callback)
+ tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+
+ if (mult.phys_base)
+ dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys: system pipe context
+ * @desc: descriptor to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ unsigned long irq_flags;
+ int result;
+ u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+ dma_addr_t dma_address;
+ u16 len;
+
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto fail_mem_alloc;
+ }
+
+ WARN_ON(desc->len > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the packet does not
+ * cross a 1KB boundary
+ */
+ tx_pkt->bounce = dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+ GFP_KERNEL, &dma_address);
+ if (!tx_pkt->bounce) {
+ dma_address = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary
+ ((u32)dma_address, (u32)dma_address + desc->len - 1,
+ 1024));
+ memcpy(tx_pkt->bounce, desc->pyld, desc->len);
+ }
+
+ if (!dma_address) {
+ IPAERR("failed to DMA wrap\n");
+ goto fail_dma_map;
+ }
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc->type;
+ tx_pkt->cnt = 1; /* only 1 desc in this "set" */
+
+ tx_pkt->mem.phys_base = dma_address;
+ tx_pkt->mem.base = desc->pyld;
+ tx_pkt->mem.size = desc->len;
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc->callback;
+ tx_pkt->user1 = desc->user1;
+ tx_pkt->user2 = desc->user2;
+
+ /*
+ * Special treatment for immediate commands, where the structure of the
+ * descriptor is different
+ */
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ sps_flags |= SPS_IOVEC_FLAG_IMME;
+ len = desc->opcode;
+ } else {
+ len = desc->len;
+ }
+
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+ sps_flags);
+ if (result) {
+ IPAERR("sps_transfer_one failed rc=%d\n", result);
+ goto fail_sps_send;
+ }
+
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ return 0;
+
+fail_sps_send:
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ dma_address);
+fail_dma_map:
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+ return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+ struct ipa_tx_pkt_wrapper *next_pkt;
+ struct sps_transfer transfer = { 0 };
+ struct sps_iovec *iovec;
+ unsigned long irq_flags;
+ dma_addr_t dma_addr;
+ int i;
+ int j;
+ int result;
+ int fail_dma_wrap;
+ uint size = num_desc * sizeof(struct sps_iovec);
+
+ for (i = 0; i < num_desc; i++) {
+ fail_dma_wrap = 0;
+ tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!tx_pkt) {
+ IPAERR("failed to alloc tx wrapper\n");
+ goto failure;
+ }
+ /*
+ * first desc of set is "special" as it holds the count and
+ * other info
+ */
+ if (i == 0) {
+ transfer.user = tx_pkt;
+ transfer.iovec =
+ dma_alloc_coherent(NULL, size, &dma_addr, 0);
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ if (!transfer.iovec) {
+ IPAERR("fail alloc DMA mem for sps xfr buff\n");
+ goto failure;
+ }
+
+ tx_pkt->mult.phys_base = dma_addr;
+ tx_pkt->mult.base = transfer.iovec;
+ tx_pkt->mult.size = size;
+ tx_pkt->cnt = num_desc;
+ }
+
+ iovec = &transfer.iovec[i];
+ iovec->flags = 0;
+
+ INIT_LIST_HEAD(&tx_pkt->link);
+ INIT_WORK(&tx_pkt->work, ipa_write_done);
+ tx_pkt->type = desc[i].type;
+
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ WARN_ON(tx_pkt->mem.size > 512);
+
+ /*
+ * Due to a HW limitation, we need to make sure that the
+ * packet does not cross a 1KB boundary
+ */
+ tx_pkt->bounce =
+ dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool, GFP_KERNEL,
+ &tx_pkt->mem.phys_base);
+ if (!tx_pkt->bounce) {
+ tx_pkt->mem.phys_base = 0;
+ } else {
+ WARN_ON(!ipa_straddle_boundary(
+ (u32)tx_pkt->mem.phys_base,
+ (u32)tx_pkt->mem.phys_base +
+ tx_pkt->mem.size - 1, 1024));
+ memcpy(tx_pkt->bounce, tx_pkt->mem.base,
+ tx_pkt->mem.size);
+ }
+
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to alloc tx wrapper\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
+ tx_pkt->sys = sys;
+ tx_pkt->callback = desc[i].callback;
+ tx_pkt->user1 = desc[i].user1;
+ tx_pkt->user2 = desc[i].user2;
+
+ iovec->addr = tx_pkt->mem.phys_base;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->len++;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /*
+ * Special treatment for immediate commands, where the structure
+ * of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ iovec->size = desc[i].opcode;
+ iovec->flags |= SPS_IOVEC_FLAG_IMME;
+ } else {
+ iovec->size = desc[i].len;
+ }
+
+ if (i == (num_desc - 1)) {
+ iovec->flags |= (SPS_IOVEC_FLAG_EOT |
+ SPS_IOVEC_FLAG_INT);
+ /* "mark" the last desc */
+ tx_pkt->cnt = 0xFFFF;
+ }
+ }
+
+ result = sps_transfer(sys->ep->ep_hdl, &transfer);
+ if (result) {
+ IPAERR("sps_transfer failed rc=%d\n", result);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+ tx_pkt = transfer.user;
+ for (j = 0; j < i; j++) {
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ next_pkt = list_next_entry(tx_pkt, link);
+ list_del(&tx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+ tx_pkt->mem.phys_base);
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ tx_pkt = next_pkt;
+ }
+ if (i < num_desc)
+ /* last desc failed */
+ if (fail_dma_wrap)
+ kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ if (transfer.iovec_phys)
+ dma_free_coherent(NULL, size, transfer.iovec,
+ transfer.iovec_phys);
+
+ return -EFAULT;
+}
+
+/**
+ * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * immediate command is complete.
+ * @user1: pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_cmd_ack(void *user1, void *user2)
+{
+ struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+ if (!desc)
+ WARN_ON(1);
+ IPADBG("got ack for cmd=%d\n", desc->opcode);
+ complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc: number of descriptors within the descr struct
+ * @descr: descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+ struct ipa_desc *desc;
+
+ if (num_desc == 1) {
+ init_completion(&descr->xfer_done);
+
+ /* client should not set these */
+ if (descr->callback || descr->user1)
+ WARN_ON(1);
+
+ descr->callback = ipa_cmd_ack;
+ descr->user1 = descr;
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
+ IPAERR("fail to send immediate command\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&descr->xfer_done);
+ } else {
+ desc = &descr[num_desc - 1];
+ init_completion(&desc->xfer_done);
+
+ /* client should not set these */
+ if (desc->callback || desc->user1)
+ WARN_ON(1);
+
+ desc->callback = ipa_cmd_ack;
+ desc->user1 = desc;
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
+ IPAERR("fail to send multiple immediate command set\n");
+ return -EFAULT;
+ }
+ wait_for_completion(&desc->xfer_done);
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_tx_notify() - Callback function which will be called by the SPS driver
+ * after a Tx operation is complete. Called in an interrupt context.
+ * @notify: SPS driver supplied notification struct
+ */
+static void ipa_tx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_tx_pkt_wrapper *tx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ tx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ * - Disconnect the packet from the system pipe linked list
+ * - Unmap the packets skb, make it non DMAable
+ * - Free the packet from the cache
+ * - Prepare a proper skb
+ * - Call the endpoints notify function, passing the skb in the parameters
+ * - Replenish the rx cache
+ */
+void ipa_handle_rx_core(void)
+{
+ struct ipa_a5_mux_hdr *mux_hdr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct sk_buff *rx_skb;
+ struct sps_iovec iov;
+ unsigned long irq_flags;
+ u16 pull_len;
+ u16 padding;
+ int ret;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+ struct ipa_ep_context *ep;
+
+ do {
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ break;
+ }
+
+ /* Break the loop when there are no more packets to receive */
+ if (iov.addr == 0)
+ break;
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ if (list_empty(&sys->head_desc_list))
+ WARN_ON(1);
+ rx_pkt = list_first_entry(&sys->head_desc_list,
+ struct ipa_rx_pkt_wrapper, link);
+ if (!rx_pkt)
+ WARN_ON(1);
+ rx_pkt->len = iov.size;
+ sys->len--;
+ list_del(&rx_pkt->link);
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ IPADBG("--curr_cnt=%d\n", sys->len);
+
+ rx_skb = rx_pkt->skb;
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ /*
+ * make it look like a real skb, "data" was already set at
+ * alloc time
+ */
+ rx_skb->tail = rx_skb->data + rx_pkt->len;
+ rx_skb->len = rx_pkt->len;
+ rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+
+ mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+ IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ mux_hdr->src_pipe_index,
+ mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+ IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+ if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
+ IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
+ mux_hdr->src_pipe_index,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+ dev_kfree_skb_any(rx_skb);
+ ipa_replenish_rx_cache();
+ continue;
+ }
+
+ ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
+ pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+ /*
+ * IP packet starts on word boundary
+ * remove the MUX header and any padding and pass the frame to
+ * the client which registered a rx callback on the "src pipe"
+ */
+ padding = ep->cfg.hdr.hdr_len & 0x3;
+ if (padding)
+ pull_len += 4 - padding;
+
+ IPADBG("pulling %d bytes from skb\n", pull_len);
+ skb_pull(rx_skb, pull_len);
+ ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ ipa_replenish_rx_cache();
+ } while (1);
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(void)
+{
+ int ret;
+ struct ipa_sys_context *sys;
+
+ IPADBG("Enter");
+ if (!ipa_ctx->curr_polling_state) {
+ IPAERR("already in intr mode\n");
+ return;
+ }
+
+ sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ sys->event.options = SPS_O_EOT;
+ ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+ if (ret) {
+ IPAERR("sps_register_event() failed %d\n", ret);
+ return;
+ }
+ sys->ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+ ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_handle_rx_core();
+ ipa_ctx->curr_polling_state = 0;
+}
+
+/**
+ * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
+ */
+static void ipa_rx_switch_to_poll_mode(void)
+{
+ int ret;
+ struct ipa_ep_context *ep;
+
+ IPADBG("Enter");
+ ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+ ret = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ return;
+ }
+ ep->connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ return;
+ }
+ ipa_ctx->curr_polling_state = 1;
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify: SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+
+ IPADBG("event %d notified\n", notify->event_id);
+
+ switch (notify->event_id) {
+ case SPS_EVENT_EOT:
+ if (!ipa_ctx->curr_polling_state) {
+ ipa_rx_switch_to_poll_mode();
+ rx_pkt = notify->data.transfer.user;
+ queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+ }
+ break;
+ default:
+ IPAERR("recieved unexpected event id %d\n", notify->event_id);
+ }
+}
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in: [in] input needed to setup BAM pipe and config EP
+ * @clnt_hdl: [out] client handle
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+ int ipa_ep_idx;
+ int sys_idx = -1;
+ int result = -EFAULT;
+ dma_addr_t dma_addr;
+
+ if (sys_in == NULL || clnt_hdl == NULL ||
+ sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+ IPAERR("bad parm.\n");
+ result = -EINVAL;
+ goto fail_bad_param;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("Invalid client.\n");
+ goto fail_bad_param;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
+ IPAERR("EP already allocated.\n");
+ goto fail_bad_param;
+ }
+
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+ ipa_ctx->ep[ipa_ep_idx].valid = 1;
+ ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
+
+ if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+ IPAERR("fail to configure EP.\n");
+ goto fail_sps_api;
+ }
+
+ /* Default Config */
+ ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
+
+ if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
+ IPAERR("SPS EP allocation failed.\n");
+ goto fail_sps_api;
+ }
+
+ result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("fail to get config.\n");
+ goto fail_mem_alloc;
+ }
+
+ /* Specific Config */
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
+ } else {
+ ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
+ ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
+ ipa_ctx->ep[ipa_ep_idx].connect.destination =
+ ipa_ctx->bam_handle;
+ ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
+ ipa_ctx->a5_pipe_index++;
+ ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
+ ipa_ctx->ep[ipa_ep_idx].connect.options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ if (ipa_ctx->polling_mode)
+ ipa_ctx->ep[ipa_ep_idx].connect.options |=
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
+ dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ &dma_addr, 0);
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
+ if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
+ IPAERR("fail to get DMA desc memory.\n");
+ goto fail_mem_alloc;
+ }
+
+ ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+ result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->ep[ipa_ep_idx].connect);
+ if (result) {
+ IPAERR("sps_connect fails.\n");
+ goto fail_sps_connect;
+ }
+
+ switch (ipa_ep_idx) {
+ case 1:
+ /* fall through */
+ case 2:
+ /* fall through */
+ case 3:
+ sys_idx = ipa_ep_idx;
+ break;
+ case 15:
+ sys_idx = IPA_A5_WLAN_AMPDU_OUT;
+ break;
+ default:
+ IPAERR("Invalid EP index.\n");
+ result = -EFAULT;
+ goto fail_register_event;
+ }
+
+ if (!ipa_ctx->polling_mode) {
+ if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("rx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ } else {
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ result =
+ sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("tx register event error %d\n", result);
+ goto fail_register_event;
+ }
+ }
+ }
+
+ return 0;
+
+fail_register_event:
+ sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_connect:
+ dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
+ ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
+fail_mem_alloc:
+ sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_api:
+ memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_bad_param:
+ return result;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
+ ipa_ctx->ep[clnt_hdl].connect.desc.base,
+ ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
+ sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
+ memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+/**
+ * ipa_tx_comp() - Callback function which will call the user supplied callback
+ * function to release the skb, or release it on its own if no callback function
+ * was supplied.
+ * @user1
+ * @user2
+ */
+static void ipa_tx_comp(void *user1, void *user2)
+{
+ struct sk_buff *skb = (struct sk_buff *)user1;
+ u32 ep_idx = (u32)user2;
+
+ IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+ if (ipa_ctx->ep[ep_idx].notify)
+ ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+ IPA_WRITE_DONE, (unsigned long)skb);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst: [in] which IPA destination to route tx packets to
+ * @skb: [in] the packet to send
+ * @metadata: [in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client calback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *meta)
+{
+ struct ipa_desc desc[2];
+ int ipa_ep_idx;
+ struct ipa_ip_packet_init *cmd;
+
+ memset(&desc, 0, 2 * sizeof(struct ipa_desc));
+
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
+ if (ipa_ep_idx == -1) {
+ IPAERR("dest EP does not exist.\n");
+ goto fail_gen;
+ }
+
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("dest EP not valid.\n");
+ goto fail_gen;
+ }
+
+ if (IPA_CLIENT_IS_CONS(dst)) {
+ cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_mem_alloc;
+ }
+
+ cmd->destination_pipe_index = ipa_ep_idx;
+ if (meta && meta->mbim_stream_id_valid)
+ cmd->metadata = meta->mbim_stream_id;
+ desc[0].opcode = IPA_IP_PACKET_INIT;
+ desc[0].pyld = cmd;
+ desc[0].len = sizeof(struct ipa_ip_packet_init);
+ desc[0].type = IPA_IMM_CMD_DESC;
+ desc[1].pyld = skb->data;
+ desc[1].len = skb->len;
+ desc[1].type = IPA_DATA_DESC_SKB;
+ desc[1].callback = ipa_tx_comp;
+ desc[1].user1 = skb;
+ desc[1].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send;
+ }
+ } else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
+ desc[0].pyld = skb->data;
+ desc[0].len = skb->len;
+ desc[0].type = IPA_DATA_DESC_SKB;
+ desc[0].callback = ipa_tx_comp;
+ desc[0].user1 = skb;
+ desc[0].user2 = (void *)ipa_ep_idx;
+
+ if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
+ &desc[0])) {
+ IPAERR("fail to send skb\n");
+ goto fail_gen;
+ }
+ } else {
+ IPAERR("%d PROD is not supported.\n", dst);
+ goto fail_gen;
+ }
+
+ return 0;
+
+fail_send:
+ kfree(cmd);
+fail_mem_alloc:
+fail_gen:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+void ipa_handle_rx(struct work_struct *work)
+{
+ ipa_handle_rx_core();
+ ipa_rx_switch_to_intr_mode();
+}
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ * - Allocate a buffer in the cache
+ * - Initialized the packets link
+ * - Initialize the packets work struct
+ * - Allocate the packets socket buffer (skb)
+ * - Fill the packets skb with data
+ * - Make the packet DMAable
+ * - Add the packet to the system pipe linked list
+ * - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+void ipa_replenish_rx_cache(void)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ rx_len_cached = sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ /* true RX data path is not currently exercised so drop the ceil */
+ while (rx_len_cached < (IPA_RX_POOL_CEIL >> 3)) {
+ rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+ GFP_KERNEL);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+
+ rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+ if (rx_pkt->skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ goto fail_skb_alloc;
+ }
+ ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
+ rx_pkt->dma_address = dma_map_single(NULL, ptr,
+ IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->dma_address, ptr);
+ goto fail_dma_mapping;
+ }
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+ ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
+ IPA_RX_SKB_SIZE, rx_pkt,
+ SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+
+ IPADBG("++curr_cnt=%d\n", sys->len);
+ }
+
+ return;
+
+fail_sps_transfer:
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_del(&rx_pkt->link);
+ --sys->len;
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+fail_dma_mapping:
+ dev_kfree_skb_any(rx_pkt->skb);
+fail_skb_alloc:
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+ return;
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+void ipa_cleanup_rx(void)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_rx_pkt_wrapper *r;
+ unsigned long irq_flags;
+ struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->head_desc_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_pkt->skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
new file mode 100644
index 0000000..81f3a80
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE (4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3)
+#define IPA_FLT_BIT_MASK (0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_flt_entry *entry, u8 *buf)
+{
+ struct ipa_flt_rule_hw_hdr *hdr;
+ const struct ipa_flt_rule *rule =
+ (const struct ipa_flt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+ hdr->u.hdr.action = entry->rule.action;
+ hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+ hdr->u.hdr.rsvd = 0;
+ buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule %x\n", en_rule);
+
+ hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR("hw_len differs b/w passes passed=%x calc=%x\n",
+ entry->hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 total_sz = 0;
+ u32 rule_set_sz;
+ int i;
+
+ *hdr_sz = 0;
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ rule_set_sz = 0;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW FLT rule size\n");
+ return -EPERM;
+ }
+ IPADBG("pipe %d len %d\n", i, entry->hw_len);
+ rule_set_sz += entry->hw_len;
+ }
+
+ if (rule_set_sz) {
+ tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+ /* this rule-set uses a word in header block */
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ if (!tbl->in_sys) {
+ /* add the terminator */
+ total_sz += (rule_set_sz +
+ IPA_FLT_TABLE_WORD_SIZE);
+ total_sz = (total_sz +
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+ }
+
+ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ u32 hdr_top = 0;
+ int i;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer flt_tbl_mem;
+ u8 *ftbl_membody;
+
+ mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+ mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+ if (mem->size == 0) {
+ IPAERR("flt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the flt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* write a dummy header to move cursor */
+ hdr = ipa_write_32(hdr_top, hdr);
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ hdr_top |= IPA_FLT_BIT_MASK;
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("failed to gen HW FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody = ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (!list_empty(&tbl->head_flt_rule_list)) {
+ /* pipe "i" is at bit "i+1" */
+ hdr_top |= (1 << (i + 1));
+ if (!tbl->in_sys) {
+ offset = body - base;
+ if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("ofst is not word multiple %d\n",
+ offset);
+ goto proc_err;
+ }
+ offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_FLT_BIT_MASK;
+ hdr = ipa_write_32(offset, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ body)) {
+ IPAERR("fail gen FLT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_FLT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the flt tbl */
+ flt_tbl_mem.size = tbl->sz;
+ flt_tbl_mem.base =
+ dma_alloc_coherent(NULL, flt_tbl_mem.size,
+ &flt_tbl_mem.phys_base,
+ GFP_KERNEL);
+ if (!flt_tbl_mem.base) {
+ IPAERR("fail alloc DMA buff size %d\n",
+ flt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(flt_tbl_mem.phys_base &
+ IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+ ftbl_membody = flt_tbl_mem.base;
+ memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+ hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+ /* generate the rule-set */
+ list_for_each_entry(entry,
+ &tbl->head_flt_rule_list,
+ link) {
+ if (ipa_generate_flt_hw_rule(ip, entry,
+ ftbl_membody)) {
+ IPAERR("fail gen FLT rule\n");
+ WARN_ON(1);
+ }
+ ftbl_membody += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ ftbl_membody =
+ ipa_write_32(0, ftbl_membody);
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = flt_tbl_mem;
+ }
+ }
+ }
+
+ /* now write the hdr_top */
+ ipa_write_32(hdr_top, base);
+
+ return 0;
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+ }
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+
+ if (list_empty(&tbl->head_flt_rule_list)) {
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+ i, ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ memset(&tbl->curr_mem, 0,
+ sizeof(tbl->curr_mem));
+ }
+ }
+ }
+}
+
+static int __ipa_commit_flt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_filter_init *v4;
+ struct ipa_ip_v6_filter_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v4_filter_init);
+ } else {
+ avail = IPA_RAM_V6_FLT_SIZE;
+ size = sizeof(struct ipa_ip_v6_filter_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_flt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_filter_init *)cmd;
+ desc.opcode = IPA_IP_V4_FILTER_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_FLT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_filter_init *)cmd;
+ desc.opcode = IPA_IP_V6_FILTER_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_FLT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_flt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (!rule->rt_tbl_hdl) {
+ IPAERR("flt rule does not point to valid RT tbl\n");
+ goto error;
+ }
+
+ if (ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rule->rt_tbl_hdl) == NULL) {
+ IPAERR("RT tbl not found\n");
+ goto error;
+ }
+
+ if (((struct ipa_rt_tbl *)rule->rt_tbl_hdl)->cookie != IPA_COOKIE) {
+ IPAERR("flt rule cookie is invalid\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc FLT rule object\n");
+ goto mem_alloc_fail;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->rule = *rule;
+ entry->cookie = IPA_COOKIE;
+ entry->rt_tbl = (struct ipa_rt_tbl *)rule->rt_tbl_hdl;
+ entry->tbl = tbl;
+ if (add_rear)
+ list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_flt_rule_list);
+ tbl->rule_cnt++;
+ entry->rt_tbl->ref_cnt++;
+ *rule_hdl = (u32)entry;
+ IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->flt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+mem_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+
+ return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+ struct ipa_flt_entry *entry = (struct ipa_flt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+
+ return -EPERM;
+ }
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+ const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ IPADBG("add global flt rule ip=%d\n", ip);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+ const struct ipa_flt_rule *rule, u8 add_rear,
+ u32 *rule_hdl)
+{
+ struct ipa_flt_tbl *tbl;
+ int ipa_ep_idx;
+
+ if (ip >= IPA_IP_MAX || rule == NULL || rule_hdl == NULL ||
+ ep >= IPA_CLIENT_MAX) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
+ if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
+ ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+ IPAERR("bad parms\n");
+
+ return -EINVAL;
+ }
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+ IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ int i;
+ int result;
+
+ if (rules == NULL || rules->num_rules == 0 ||
+ rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (rules->global)
+ result = __ipa_add_global_flt_rule(rules->ip,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ else
+ result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].flt_rule_hdl);
+ if (result) {
+ IPAERR("failed to add flt rule %d\n", i);
+ rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_flt(rules->ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ int i;
+ int result;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_flt(hdls->ip)) {
+ mutex_unlock(&ipa_ctx->lock);
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ int result;
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (__ipa_commit_flt(ip)) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip: [in] the family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ struct ipa_flt_tbl *tbl;
+ struct ipa_flt_entry *entry;
+ struct ipa_flt_entry *next;
+ struct ipa_tree_node *node;
+ int i;
+
+ tbl = &ipa_ctx->glob_flt_tbl[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset flt ip=%d\n", ip);
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ for (i = 0; i < IPA_NUM_PIPES; i++) {
+ tbl = &ipa_ctx->flt_tbl[i][ip];
+ list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+ link) {
+ node = ipa_search(&ipa_ctx->flt_rule_hdl_tree,
+ (u32)entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
new file mode 100644
index 0000000..4b9a500
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 32, 64 };
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem: [out] buffer to put the header table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+ struct ipa_hdr_entry *entry;
+
+ mem->size = ipa_ctx->hdr_tbl.end;
+
+ if (mem->size == 0) {
+ IPAERR("hdr tbl empty\n");
+ return -EPERM;
+ }
+ IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ memset(mem->base, 0, mem->size);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+ entry->offset_entry->offset);
+ memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+ entry->hdr_len);
+ }
+
+ return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+static int __ipa_commit_hdr(void)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ struct ipa_hdr_init_local *cmd;
+ u16 len;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ /* the immediate command param size is same for both local and system */
+ len = sizeof(struct ipa_hdr_init_local);
+
+ /*
+ * we can use init_local ptr for init_system due to layout of the
+ * struct
+ */
+ cmd = kmalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_hdr_hw_tbl(mem)) {
+ IPAERR("fail to generate HDR HW TBL\n");
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+ IPA_RAM_HDR_SIZE);
+ goto fail_hw_tbl_gen;
+ }
+
+ cmd->hdr_table_addr = mem->phys_base;
+ if (ipa_ctx->hdr_tbl_lcl) {
+ cmd->size_hdr_table = mem->size;
+ cmd->hdr_addr = IPA_RAM_HDR_OFST;
+ desc.opcode = IPA_HDR_INIT_LOCAL;
+ } else {
+ desc.opcode = IPA_HDR_INIT_SYSTEM;
+ }
+ desc.pyld = cmd;
+ desc.len = sizeof(struct ipa_hdr_init_local);
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ if (ipa_ctx->hdr_tbl_lcl) {
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ } else {
+ if (ipa_ctx->hdr_mem.phys_base) {
+ dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
+ ipa_ctx->hdr_mem.base,
+ ipa_ctx->hdr_mem.phys_base);
+ }
+ ipa_ctx->hdr_mem = *mem;
+ }
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+
+ return -EPERM;
+}
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_offset_entry *offset;
+ struct ipa_tree_node *node;
+ u32 bin;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (hdr->hdr_len == 0) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc hdr object\n");
+ goto hdr_alloc_fail;
+ }
+
+ INIT_LIST_HEAD(&entry->link);
+
+ memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+ entry->hdr_len = hdr->hdr_len;
+ strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+ entry->is_partial = hdr->is_partial;
+ entry->cookie = IPA_COOKIE;
+
+ if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+ bin = IPA_HDR_BIN0;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+ bin = IPA_HDR_BIN1;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+ bin = IPA_HDR_BIN2;
+ else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+ bin = IPA_HDR_BIN3;
+ else {
+ IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ goto bad_hdr_len;
+ }
+
+ if (list_empty(&htbl->head_free_offset_list[bin])) {
+ offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+ GFP_KERNEL);
+ if (!offset) {
+ IPAERR("failed to alloc hdr offset object\n");
+ goto ofst_alloc_fail;
+ }
+ INIT_LIST_HEAD(&offset->link);
+ /*
+ * for a first item grow, set the bin and offset which are set
+ * in stone
+ */
+ offset->offset = htbl->end;
+ offset->bin = bin;
+ htbl->end += ipa_hdr_bin_sz[bin];
+ list_add(&offset->link,
+ &htbl->head_offset_list[bin]);
+ } else {
+ /* get the first free slot */
+ offset =
+ list_first_entry(&htbl->head_free_offset_list[bin],
+ struct ipa_hdr_offset_entry, link);
+ list_move(&offset->link, &htbl->head_offset_list[bin]);
+ }
+
+ entry->offset_entry = offset;
+ list_add(&entry->link, &htbl->head_hdr_entry_list);
+ htbl->hdr_cnt++;
+ IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
+ htbl->hdr_cnt, offset->offset);
+
+ hdr->hdr_hdl = (u32) entry;
+ node->hdl = hdr->hdr_hdl;
+ if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+
+ return 0;
+
+ofst_alloc_fail:
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
+bad_hdr_len:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+hdr_alloc_fail:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+static int __ipa_del_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+ if (!entry || (entry->cookie != IPA_COOKIE) || (entry->ref_cnt != 0)) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+ htbl->hdr_cnt, entry->offset_entry->offset);
+
+ /* move the offset entry to appropriate free list */
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[entry->offset_entry->bin]);
+ list_del(&entry->link);
+ htbl->hdr_cnt--;
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs: [inout] set of headers to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdrs == NULL || hdrs->num_hdrs == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdrs->num_hdrs; i++) {
+ if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ IPAERR("failed to add hdr %d\n", i);
+ hdrs->hdr[i].status = -1;
+ } else {
+ hdrs->hdr[i].status = 0;
+ }
+ }
+
+ if (hdrs->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
+ * to IPA HW
+ * @hdls: [inout] set of headers to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ int i;
+ int result = -EFAULT;
+
+ if (hdls == NULL || hdls->num_hdls == 0) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del hdr %i\n", i);
+ hdls->hdl[i].status = -1;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit) {
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_dump_hdr() - prints all the headers in the header table in SW
+ *
+ * Note: Should not be called from atomic context
+ */
+void ipa_dump_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+
+ IPADBG("START\n");
+ mutex_lock(&ipa_ctx->lock);
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
+ entry->offset_entry->offset,
+ entry->offset_entry->bin);
+ }
+ mutex_unlock(&ipa_ctx->lock);
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+ int result = -EFAULT;
+
+ /*
+ * issue a commit on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_commit_rt(IPA_IP_v4))
+ return -EPERM;
+ if (ipa_commit_rt(IPA_IP_v6))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_hdr()) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+ struct ipa_hdr_entry *entry;
+ struct ipa_hdr_entry *next;
+ struct ipa_hdr_offset_entry *off_entry;
+ struct ipa_hdr_offset_entry *off_next;
+ struct ipa_tree_node *node;
+ int i;
+
+ /*
+ * issue a reset on the routing module since routing rules point to
+ * header table entries
+ */
+ if (ipa_reset_rt(IPA_IP_v4))
+ IPAERR("fail to reset v4 rt\n");
+ if (ipa_reset_rt(IPA_IP_v6))
+ IPAERR("fail to reset v4 rt\n");
+
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset hdr\n");
+ list_for_each_entry_safe(entry, next,
+ &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+ /* do not remove the default exception header */
+ if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX))
+ continue;
+
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
+ if (node == NULL)
+ WARN_ON(1);
+ list_del(&entry->link);
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ }
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+
+ /*
+ * do not remove the default exception header which is
+ * at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
+
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_free_offset_list[i],
+ link) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ }
+ }
+ /* there is one header of size 8 */
+ ipa_ctx->hdr_tbl.end = 8;
+ ipa_ctx->hdr_tbl.hdr_cnt = 1;
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+ struct ipa_hdr_entry *entry;
+
+ list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+ link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup: [inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists, if
+ * lookup succeeds the header entry ref cnt is increased
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -1;
+
+ if (lookup == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(lookup->name);
+ if (entry) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t) entry;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl: [in] the header handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+ struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+ struct ipa_tree_node *node;
+ int result = -EFAULT;
+
+ if (entry == NULL || entry->cookie != IPA_COOKIE ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0) {
+ if (__ipa_del_hdr(hdr_hdl)) {
+ IPAERR("fail to del hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ /* commit for put */
+ if (__ipa_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ result = -EFAULT;
+ goto bail;
+ }
+ }
+ result = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy: [inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ struct ipa_hdr_entry *entry;
+ int result = -EFAULT;
+
+ if (copy == NULL) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_find_hdr(copy->name);
+ if (entry) {
+ memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+ copy->hdr_len = entry->hdr_len;
+ copy->is_partial = entry->is_partial;
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+
diff --git a/drivers/platform/msm/ipa/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_hw_defs.h
new file mode 100644
index 0000000..3131a84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hw_defs.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT (1)
+#define IPA_PPP_FRM_INIT (2)
+#define IPA_IP_V4_FILTER_INIT (3)
+#define IPA_IP_V6_FILTER_INIT (4)
+#define IPA_IP_V4_NAT_INIT (5)
+#define IPA_IP_V6_NAT_INIT (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL (9)
+#define IPA_HDR_INIT_SYSTEM (10)
+#define IPA_DECIPH_SETUP (11)
+#define IPA_INSERT_NAT_RULE (12)
+#define IPA_DELETE_NAT_RULE (13)
+#define IPA_NAT_DMA (14)
+#define IPA_IP_PACKET_TAG (15)
+#define IPA_IP_PACKET_INIT (16)
+
+#define IPA_INTERFACE_ID_EXCEPTION (0)
+#define IPA_INTERFACE_ID_A2_WWAN (0x10)
+#define IPA_INTERFACE_ID_HSUSB_RMNET1 (0x21)
+#define IPA_INTERFACE_ID_HSUSB_RMNET2 (0x22)
+#define IPA_INTERFACE_ID_HSUSB_RMNET3 (0x23)
+#define IPA_INTERFACE_ID_HSIC_WLAN_WAN (0x31)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN1 (0x32)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN2 (0x33)
+#define IPA_INTERFACE_ID_HSIC_RMNET1 (0x41)
+#define IPA_INTERFACE_ID_HSIC_RMNET2 (0x42)
+#define IPA_INTERFACE_ID_HSIC_RMNET3 (0x43)
+#define IPA_INTERFACE_ID_HSIC_RMNET4 (0x44)
+#define IPA_INTERFACE_ID_HSIC_RMNET5 (0x45)
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @rsvd: reserved
+ */
+struct ipa_flt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 action:5;
+ u32 rt_tbl_idx:5;
+ u32 rsvd:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ */
+struct ipa_rt_rule_hw_hdr {
+ union {
+ u32 word;
+ struct {
+ u32 en_rule:16;
+ u32 pipe_dest_idx:5;
+ u32 system:1;
+ u32 hdr_offset:10;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+ u64 ipv4_rules_addr:32;
+ u64 size_ipv4_rules:12;
+ u64 ipv4_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+ u64 ipv6_rules_addr:32;
+ u64 size_ipv6_rules:16;
+ u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_addr: address of header table
+ * @size_hdr_table: size of the above
+ * @hdr_addr: header address
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+ u64 hdr_table_addr:32;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: address of header table
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+ u64 hdr_table_addr:32;
+ u64 rsvd:32;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(0)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(1)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(2)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(5)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+ u16 interface_id;
+ u8 src_pipe_index;
+ u8 flags;
+ u32 metadata;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsvd1:3;
+ u64 metadata:32;
+ u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+ u64 ipv4_rules_addr:32;
+ u64 ipv4_expansion_rules_addr:32;
+ u64 index_table_addr:32;
+ u64 index_table_expansion_addr:32;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
new file mode 100644
index 0000000..63ef5fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+
+#define DRV_NAME "ipa"
+#define IPA_COOKIE 0xfacefeed
+
+#define IPA_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ (0x800)
+
+#ifdef IPA_DEBUG
+#define IPADBG(fmt, args...) \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#else
+#define IPADBG(fmt, args...)
+#endif
+
+#define IPAERR(fmt, args...) \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPA_TOS_EQ BIT(0)
+#define IPA_PROTOCOL_EQ BIT(1)
+#define IPA_OFFSET_MEQ32_0 BIT(2)
+#define IPA_OFFSET_MEQ32_1 BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0 BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1 BIT(5)
+#define IPA_IHL_OFFSET_EQ_16 BIT(6)
+#define IPA_IHL_OFFSET_EQ_32 BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0 BIT(8)
+#define IPA_OFFSET_MEQ128_0 BIT(9)
+#define IPA_OFFSET_MEQ128_1 BIT(10)
+#define IPA_TC_EQ BIT(11)
+#define IPA_FL_EQ BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1 BIT(13)
+#define IPA_METADATA_COMPARE BIT(14)
+#define IPA_IPV4_IS_FRAG BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN_MAX 4
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_SKB_SIZE 2048
+
+#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE (128)
+
+/**
+ * enum ipa_sys_pipe - 5 A5-IPA pipes
+ *
+ * 5 A5-IPA pipes (all system mode)
+ */
+enum ipa_sys_pipe {
+ IPA_A5_UNUSED,
+ IPA_A5_CMD,
+ IPA_A5_LAN_WAN_OUT,
+ IPA_A5_LAN_WAN_IN,
+ IPA_A5_WLAN_AMPDU_OUT,
+ IPA_A5_SYS_MAX
+};
+
+/**
+ * enum ipa_operating_mode - IPA operating mode
+ *
+ * IPA operating mode
+ */
+enum ipa_operating_mode {
+ IPA_MODE_USB_DONGLE,
+ IPA_MODE_MSM,
+ IPA_MODE_EXT_APPS,
+ IPA_MODE_MOBILE_AP_WAN,
+ IPA_MODE_MOBILE_AP_WLAN,
+ IPA_MODE_MOBILE_AP_ETH,
+ IPA_MODE_MAX
+};
+
+/**
+ * enum ipa_bridge_dir - direction of the bridge from air interface perspective
+ *
+ * IPA bridge direction
+ */
+enum ipa_bridge_dir {
+ IPA_DL,
+ IPA_UL,
+ IPA_DIR_MAX
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+ void *base;
+ dma_addr_t phys_base;
+ u32 size;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+ struct list_head link;
+ struct ipa_flt_rule rule;
+ u32 cookie;
+ struct ipa_flt_tbl *tbl;
+ struct ipa_rt_tbl *rt_tbl;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of raouting table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ */
+struct ipa_rt_tbl {
+ struct list_head link;
+ struct list_head head_rt_rule_list;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u32 idx;
+ u32 rule_cnt;
+ u32 ref_cnt;
+ struct ipa_rt_tbl_set *set;
+ u32 cookie;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @is_partial: flag indicating if header table entry is partial
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of raouting table
+ */
+struct ipa_hdr_entry {
+ struct list_head link;
+ u8 hdr[IPA_HDR_MAX_SIZE];
+ u32 hdr_len;
+ char name[IPA_RESOURCE_NAME_MAX];
+ u8 is_partial;
+ struct ipa_hdr_offset_entry *offset_entry;
+ u32 cookie;
+ u32 ref_cnt;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+ struct list_head link;
+ u32 offset;
+ u32 bin;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+ struct list_head head_hdr_entry_list;
+ struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+ struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+ u32 hdr_cnt;
+ u32 end;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+ struct list_head head_flt_rule_list;
+ u32 rule_cnt;
+ bool in_sys;
+ u32 sz;
+ struct ipa_mem_buffer curr_mem;
+ struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+ struct list_head link;
+ struct ipa_rt_rule rule;
+ u32 cookie;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_hdr_entry *hdr;
+ u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+ struct list_head head_rt_tbl_list;
+ u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_tree_node - handle database entry
+ * @node: RB node
+ * @hdl: handle
+ */
+struct ipa_tree_node {
+ struct rb_node node;
+ u32 hdl;
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information
+ * @notify: user provided CB for EP events notification
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ */
+struct ipa_ep_context {
+ int valid;
+ enum ipa_client_type client;
+ struct sps_pipe *ep_hdl;
+ struct ipa_ep_cfg cfg;
+ u32 dst_pipe_index;
+ u32 rt_tbl_idx;
+ struct sps_connect connect;
+ void *priv;
+ void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+ bool desc_fifo_in_pipe_mem;
+ bool data_fifo_in_pipe_mem;
+ u32 desc_fifo_pipe_mem_ofst;
+ u32 data_fifo_pipe_mem_ofst;
+ bool desc_fifo_client_allocated;
+ bool data_fifo_client_allocated;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ * @wait_desc_list: used to hold completed Tx packets
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+ struct list_head head_desc_list;
+ u32 len;
+ spinlock_t spinlock;
+ struct sps_register_event event;
+ struct ipa_ep_context *ep;
+ struct list_head wait_desc_list;
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+ IPA_DATA_DESC,
+ IPA_DATA_DESC_SKB,
+ IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: info for the skb or immediate command param
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" tranfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ */
+struct ipa_tx_pkt_wrapper {
+ enum ipa_desc_type type;
+ struct ipa_mem_buffer mem;
+ struct work_struct work;
+ struct list_head link;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct ipa_sys_context *sys;
+ struct ipa_mem_buffer mult;
+ u16 cnt;
+ void *bounce;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+ enum ipa_desc_type type;
+ void *pyld;
+ u16 len;
+ u16 opcode;
+ void (*callback)(void *user1, void *user2);
+ void *user1;
+ void *user2;
+ struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @work: work struct for current Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+ struct sk_buff *skb;
+ dma_addr_t dma_address;
+ struct work_struct work;
+ struct list_head link;
+ u16 len;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ */
+struct ipa_nat_mem {
+ struct class *class;
+ struct device *dev;
+ struct cdev cdev;
+ dev_t dev_num;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+ bool is_mapped;
+ bool is_sys_mem;
+ bool is_dev_init;
+ struct mutex lock;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @tree_node_cache: tree nodes cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @sys: IPA sys context for system-bam pipes
+ * @rx_wq: Rx packets work queue
+ * @tx_wq: Tx packets work queue
+ * @smem_sz: shared memory size
+ * @hdr_hdl_tree: header handles tree
+ * @rt_rule_hdl_tree: routing rule handles tree
+ * @rt_tbl_hdl_tree: routing table handles tree
+ * @flt_rule_hdl_tree: filtering rule handles tree
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @polling_mode: 1 - pure polling mode; 0 - interrupt+polling mode
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @curr_polling_state: current polling state
+ * @poll_work: polling work
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_mem: header memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @pipe_mem_pool: pipe memory pool
+ * @one_kb_no_straddle_pool: one kb no straddle pool
+ *
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ u32 bam_handle;
+ struct ipa_ep_context ep[IPA_NUM_PIPES];
+ struct ipa_flt_tbl flt_tbl[IPA_NUM_PIPES][IPA_IP_MAX];
+ enum ipa_operating_mode mode;
+ void __iomem *mmio;
+ u32 ipa_wrapper_base;
+ struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+ struct ipa_hdr_tbl hdr_tbl;
+ struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+ struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+ struct kmem_cache *flt_rule_cache;
+ struct kmem_cache *rt_rule_cache;
+ struct kmem_cache *hdr_cache;
+ struct kmem_cache *hdr_offset_cache;
+ struct kmem_cache *rt_tbl_cache;
+ struct kmem_cache *tx_pkt_wrapper_cache;
+ struct kmem_cache *rx_pkt_wrapper_cache;
+ struct kmem_cache *tree_node_cache;
+ unsigned long rt_idx_bitmap[IPA_IP_MAX];
+ struct mutex lock;
+ struct ipa_sys_context sys[IPA_A5_SYS_MAX];
+ struct workqueue_struct *rx_wq;
+ struct workqueue_struct *tx_wq;
+ u16 smem_sz;
+ struct rb_root hdr_hdl_tree;
+ struct rb_root rt_rule_hdl_tree;
+ struct rb_root rt_tbl_hdl_tree;
+ struct rb_root flt_rule_hdl_tree;
+ struct ipa_nat_mem nat_mem;
+ u32 excp_hdr_hdl;
+ u32 dflt_v4_rt_rule_hdl;
+ u32 dflt_v6_rt_rule_hdl;
+ bool polling_mode;
+ uint aggregation_type;
+ uint aggregation_byte_limit;
+ uint aggregation_time_limit;
+ uint curr_polling_state;
+ struct delayed_work poll_work;
+ bool hdr_tbl_lcl;
+ struct ipa_mem_buffer hdr_mem;
+ bool ip4_rt_tbl_lcl;
+ bool ip6_rt_tbl_lcl;
+ bool ip4_flt_tbl_lcl;
+ bool ip6_flt_tbl_lcl;
+ struct ipa_mem_buffer empty_rt_tbl_mem;
+ struct gen_pool *pipe_mem_pool;
+ struct dma_pool *one_kb_no_straddle_pool;
+ atomic_t ipa_active_clients;
+ u32 clnt_hdl_cmd;
+ u32 clnt_hdl_data_in;
+ u32 clnt_hdl_data_out;
+ u8 a5_pipe_index;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ */
+struct ipa_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+ IPA_SPS_PIPE_MEM = 0,
+ IPA_PRIVATE_MEM = 1,
+ IPA_SYSTEM_MEM = 2,
+};
+
+/**
+ * enum a2_mux_pipe_direction - IPA-A2 pipe direction
+ */
+enum a2_mux_pipe_direction {
+ A2_TO_IPA = 0,
+ IPA_TO_A2 = 1
+};
+
+/**
+ * struct a2_mux_pipe_connection - A2 MUX pipe connection
+ * @src_phy_addr: source physical address
+ * @src_pipe_index: source pipe index
+ * @dst_phy_addr: destination physical address
+ * @dst_pipe_index: destination pipe index
+ * @mem_type: pipe memory type
+ * @data_fifo_base_offset: data FIFO base offset
+ * @data_fifo_size: data FIFO size
+ * @desc_fifo_base_offset: descriptors FIFO base offset
+ * @desc_fifo_size: descriptors FIFO size
+ */
+struct a2_mux_pipe_connection {
+ int src_phy_addr;
+ int src_pipe_index;
+ int dst_phy_addr;
+ int dst_pipe_index;
+ enum ipa_pipe_mem_type mem_type;
+ int data_fifo_base_offset;
+ int data_fifo_size;
+ int desc_fifo_base_offset;
+ int desc_fifo_size;
+};
+
+extern struct ipa_context *ipa_ctx;
+
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+ struct a2_mux_pipe_connection *pipe_connect);
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle);
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc);
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc);
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client);
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ u8 **buf,
+ u16 *en_rule);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+void ipa_dump(void);
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem);
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+/*
+ * below functions read from/write to IPA local memory a.k.a. device memory.
+ * the order of the arguments is deliberately different from the ipa_write*
+ * functions which operate on system memory
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram);
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram);
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram);
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count);
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count);
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count);
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count);
+
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data);
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl);
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+ ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+void ipa_replenish_rx_cache(void);
+void ipa_cleanup_rx(void);
+int ipa_cfg_filter(u32 disable);
+void ipa_write_done(struct work_struct *work);
+void ipa_handle_rx(struct work_struct *work);
+void ipa_handle_rx_core(void);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+ u32 val = ioread32(base + offset);
+ IPADBG("0x%x(va) read reg 0x%x r_val 0x%x.\n",
+ (u32)base, offset, val);
+ return val;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+ IPADBG("0x%x(va) write reg 0x%x w_val 0x%x.\n",
+ (u32)base, offset, val);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+int ipa_bridge_setup(enum ipa_bridge_dir dir);
+int ipa_bridge_teardown(enum ipa_bridge_dir dir);
+
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_nat.c b/drivers/platform/msm/ipa/ipa_nat.c
new file mode 100644
index 0000000..c13c53a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_nat.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY 0
+#define IPA_NAT_SHARED_MEMORY 1
+
+static int ipa_nat_vma_fault_remap(
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ IPADBG("\n");
+ vmf->page = NULL;
+
+ return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+ .fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+ struct ipa_nat_mem *nat_ctx;
+ IPADBG("\n");
+ nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+ filp->private_data = nat_ctx;
+ IPADBG("return\n");
+ return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+ unsigned long phys_addr;
+ int result;
+
+ mutex_lock(&nat_ctx->lock);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ if (nat_ctx->is_mapped) {
+ IPAERR("mapping already exists, only 1 supported\n");
+ result = -EINVAL;
+ goto bail;
+ }
+ IPADBG("map sz=0x%x\n", nat_ctx->size);
+ result =
+ dma_mmap_coherent(
+ NULL, vma,
+ nat_ctx->vaddr, nat_ctx->dma_handle,
+ nat_ctx->size);
+
+ if (result) {
+ IPAERR("unable to map memory. Err:%d\n", result);
+ goto bail;
+ }
+ } else {
+ IPADBG("Mapping shared(local) memory\n");
+ IPADBG("map sz=0x%lx\n", vsize);
+ phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
+ IPA_SRAM_DIRECT_ACCESS_n_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+ if (remap_pfn_range(
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ IPAERR("remap failed\n");
+ result = -EAGAIN;
+ goto bail;
+ }
+
+ }
+ nat_ctx->is_mapped = true;
+ vma->vm_ops = &ipa_nat_remap_vm_ops;
+ IPADBG("return\n");
+ result = 0;
+bail:
+ mutex_unlock(&nat_ctx->lock);
+ return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+ .owner = THIS_MODULE,
+ .open = ipa_nat_open,
+ .mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem: [in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+ int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result;
+
+ IPADBG("passed memory size %d\n", mem->size);
+
+ mutex_lock(&nat_ctx->lock);
+ if (mem->size <= 0 || !strlen(mem->dev_name)
+ || nat_ctx->is_dev_init == true) {
+ IPADBG("Invalid Parameters or device is already init\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ nat_ctx->is_sys_mem = true;
+ nat_ctx->vaddr =
+ dma_alloc_coherent(NULL, mem->size, &nat_ctx->dma_handle,
+ gfp_flags);
+ if (nat_ctx->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ nat_ctx->size = mem->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ nat_ctx->is_sys_mem = false;
+ }
+
+ nat_ctx->class = class_create(THIS_MODULE, mem->dev_name);
+ if (IS_ERR(nat_ctx->class)) {
+ IPAERR("unable to create the class\n");
+ result = -ENODEV;
+ goto vaddr_alloc_fail;
+ }
+ result = alloc_chrdev_region(&nat_ctx->dev_num,
+ 0,
+ 1,
+ mem->dev_name);
+ if (result) {
+ IPAERR("alloc_chrdev_region err.\n");
+ result = -ENODEV;
+ goto alloc_chrdev_region_fail;
+ }
+
+ nat_ctx->dev =
+ device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+ mem->dev_name);
+
+ if (IS_ERR(nat_ctx->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ result = -ENODEV;
+ goto device_create_fail;
+ }
+
+ cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+ nat_ctx->cdev.owner = THIS_MODULE;
+ nat_ctx->cdev.ops = &ipa_nat_fops;
+
+ result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ if (result) {
+ IPAERR("cdev_add err=%d\n", -result);
+ goto cdev_add_fail;
+ }
+ nat_ctx->is_dev_init = true;
+ IPADBG("IPA NAT driver init successfully\n");
+ result = 0;
+ goto bail;
+
+cdev_add_fail:
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+ class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+ if (nat_ctx->vaddr) {
+ IPADBG("Releasing system memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->vaddr = NULL;
+ nat_ctx->dma_handle = 0;
+ nat_ctx->size = 0;
+ }
+bail:
+ mutex_unlock(&nat_ctx->lock);
+
+ return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ int result;
+
+ IPADBG("\n");
+ if (init->tbl_index < 0 || init->table_entries <= 0) {
+ IPADBG("Table index or entries is zero\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ if (ipa_ctx->nat_mem.vaddr) {
+ IPADBG("using system memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+ IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+ cmd->ipv4_expansion_rules_addr =
+ ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+ IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+ cmd->index_table_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd->index_table_expansion_addr =
+ ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+ cmd->ipv4_rules_addr =
+ init->ipv4_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->ipv4_expansion_rules_addr =
+ init->expn_rules_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_addr = init->index_offset + IPA_RAM_NAT_OFST;
+
+ cmd->index_table_expansion_addr =
+ init->index_expn_offset + IPA_RAM_NAT_OFST;
+ }
+ cmd->table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd->table_index);
+ cmd->size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+ cmd->size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+ cmd->public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ IPADBG("posting v4 init command\n");
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_cmd;
+ }
+
+ IPADBG("return\n");
+ result = 0;
+free_cmd:
+ kfree(cmd);
+bail:
+ return result;
+}
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ struct ipa_nat_dma *cmd = NULL;
+ struct ipa_desc *desc = NULL;
+ u16 size = 0, cnt = 0;
+ int ret = 0;
+
+ IPADBG("\n");
+ if (dma->entries <= 0) {
+ IPADBG("Invalid number of commands\n");
+ ret = -EPERM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_desc) * dma->entries;
+ desc = kmalloc(size, GFP_KERNEL);
+ if (desc == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ size = sizeof(struct ipa_nat_dma) * dma->entries;
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ for (cnt = 0; cnt < dma->entries; cnt++) {
+ cmd[cnt].table_index = dma->dma[cnt].table_index;
+ cmd[cnt].base_addr = dma->dma[cnt].base_addr;
+ cmd[cnt].offset = dma->dma[cnt].offset;
+ cmd[cnt].data = dma->dma[cnt].data;
+ desc[cnt].type = IPA_IMM_CMD_DESC;
+ desc[cnt].opcode = IPA_NAT_DMA;
+ desc[cnt].callback = NULL;
+ desc[cnt].user1 = NULL;
+
+ desc[cnt].user2 = NULL;
+
+ desc[cnt].len = sizeof(struct ipa_nat_dma);
+ desc[cnt].pyld = (void *)&cmd[cnt];
+ }
+ IPADBG("posting dma command with entries %d\n", dma->entries);
+ ret = ipa_send_cmd(dma->entries, desc);
+ if (ret == -EPERM)
+ IPAERR("Fail to send immediate command\n");
+
+bail:
+ kfree(cmd);
+ kfree(desc);
+
+ return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx: [in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+ IPADBG("\n");
+ mutex_lock(&nat_ctx->lock);
+
+ if (nat_ctx->is_sys_mem) {
+ IPADBG("freeing the dma memory\n");
+ dma_free_coherent(
+ NULL, nat_ctx->size,
+ nat_ctx->vaddr, nat_ctx->dma_handle);
+ nat_ctx->size = 0;
+ nat_ctx->vaddr = NULL;
+ }
+ nat_ctx->is_mapped = false;
+ nat_ctx->is_sys_mem = false;
+ cdev_del(&nat_ctx->cdev);
+ device_destroy(nat_ctx->class, nat_ctx->dev_num);
+ unregister_chrdev_region(nat_ctx->dev_num, 1);
+ class_destroy(nat_ctx->class);
+ nat_ctx->is_dev_init = false;
+
+ mutex_unlock(&nat_ctx->lock);
+ IPADBG("return\n");
+ return;
+}
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del: [in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_ip_v4_nat_init *cmd;
+ u16 size = sizeof(struct ipa_ip_v4_nat_init);
+ u8 mem_type = IPA_NAT_SHARED_MEMORY;
+ u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+ int result;
+
+ IPADBG("\n");
+ if (del->table_index < 0 || del->public_ip_addr == 0) {
+ IPADBG("Bad Parameter\n");
+ result = -EPERM;
+ goto bail;
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (cmd == NULL) {
+ IPAERR("Failed to alloc immediate command object\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ cmd->table_index = del->table_index;
+ cmd->ipv4_rules_addr = base_addr;
+ cmd->ipv4_rules_addr_type = mem_type;
+ cmd->ipv4_expansion_rules_addr = base_addr;
+ cmd->ipv4_expansion_rules_addr_type = mem_type;
+ cmd->index_table_addr = base_addr;
+ cmd->index_table_addr_type = mem_type;
+ cmd->index_table_expansion_addr = base_addr;
+ cmd->index_table_expansion_addr_type = mem_type;
+ cmd->size_base_tables = 0;
+ cmd->size_expansion_tables = 0;
+ cmd->public_ip_addr = del->public_ip_addr;
+
+ desc.opcode = IPA_IP_V4_NAT_INIT;
+ desc.type = IPA_IMM_CMD_DESC;
+ desc.callback = NULL;
+ desc.user1 = NULL;
+ desc.user2 = NULL;
+ desc.pyld = (void *)cmd;
+ desc.len = size;
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("Fail to send immediate command\n");
+ result = -EPERM;
+ goto free_mem;
+ }
+
+ ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+ IPADBG("return\n");
+ result = 0;
+free_mem:
+ kfree(cmd);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
new file mode 100644
index 0000000..000718b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all 8K is available
+ * for SW use) the first 2K are set aside for NAT
+ */
+
+#define IPA_RAM_NAT_OFST 0
+#define IPA_RAM_NAT_SIZE 2048
+#define IPA_RAM_HDR_OFST 2048
+#define IPA_RAM_HDR_SIZE 256
+#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
+#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_RT_OFST (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
+#define IPA_RAM_V4_RT_SIZE 1024
+#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
+#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_RT_OFST (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
+#define IPA_RAM_V6_RT_SIZE 1024
+#define IPA_RAM_END_OFST (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
new file mode 100644
index 0000000..61913b6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+/*
+ * IPA's core specific regtisters
+ */
+
+#define IPA_REG_BASE_OFST 0x00020000
+
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+#define IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define IPA_COMP_HW_VERSION_MAJOR_BMSK 0xff000000
+#define IPA_COMP_HW_VERSION_MAJOR_SHFT 0x18
+#define IPA_COMP_HW_VERSION_MINOR_BMSK 0xff0000
+#define IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_VERSION_RMSK 0xffffffff
+#define IPA_VERSION_IPA_R_REV_BMSK 0xff000000
+#define IPA_VERSION_IPA_R_REV_SHFT 0x18
+#define IPA_VERSION_IPA_Q_REV_BMSK 0xff0000
+#define IPA_VERSION_IPA_Q_REV_SHFT 0x10
+#define IPA_VERSION_IPA_P_REV_BMSK 0xff00
+#define IPA_VERSION_IPA_P_REV_SHFT 0x8
+#define IPA_VERSION_IPA_ECO_REV_BMSK 0xff
+#define IPA_VERSION_IPA_ECO_REV_SHFT 0x0
+
+#define IPA_COMP_CFG_OFST 0x00000038
+#define IPA_COMP_CFG_RMSK 0x1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0x0
+
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+#define IPA_COMP_SW_RESET_RMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_BMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_SHFT 0x0
+
+#define IPA_CLKON_CFG_OFST 0x00000040
+#define IPA_CLKON_CFG_RMSK 0xf
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_TX_BMSK 0x4
+#define IPA_CLKON_CFG_CGC_OPEN_TX_SHFT 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_SHFT 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+#define IPA_HEAD_OF_LINE_BLOCK_EN_RMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_BMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_OFST 0x00000048
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_RMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_BMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_SHFT 0x0
+
+#define IPA_ROUTE_OFST 0x0000004c
+#define IPA_ROUTE_RMSK 0x1ffff
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+
+#define IPA_FILTER_OFST 0x00000050
+#define IPA_FILTER_RMSK 0x1
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_MASTER_PRIORITY_OFST 0x00000054
+#define IPA_MASTER_PRIORITY_RMSK 0xffffffff
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_BMSK 0xc0000000
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_SHFT 0x1e
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_BMSK 0x30000000
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_SHFT 0x1c
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_BMSK 0xc000000
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_SHFT 0x1a
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_BMSK 0x3000000
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_SHFT 0x18
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_BMSK 0xc00000
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_SHFT 0x16
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_BMSK 0x300000
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_SHFT 0x14
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_BMSK 0xc0000
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_SHFT 0x12
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_BMSK 0x30000
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_SHFT 0x10
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_BMSK 0xc000
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_SHFT 0xe
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_BMSK 0x3000
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_SHFT 0xc
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_BMSK 0xc00
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_SHFT 0xa
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_BMSK 0x300
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_SHFT 0x8
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_BMSK 0xc0
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_SHFT 0x6
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_BMSK 0x30
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_SHFT 0x4
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_BMSK 0xc
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_SHFT 0x2
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_BMSK 0x3
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_SHFT 0x0
+
+#define IPA_SHARED_MEM_SIZE_OFST 0x00000058
+#define IPA_SHARED_MEM_SIZE_RMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+#define IPA_NAT_TIMER_OFST 0x0000005c
+#define IPA_NAT_TIMER_RMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_BMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_SHFT 0x0
+
+#define IPA_NAT_TIMER_RESET_OFST 0x00000060
+#define IPA_NAT_TIMER_RESET_RMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_BMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_SHFT 0x0
+
+#define IPA_ENDP_INIT_NAT_n_OFST(n) (0x00000080 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_MAXn 19
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_n_OFST(n) (0x000000e0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_n_RMSK 0x7ffffff
+#define IPA_ENDP_INIT_HDR_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_OFST(n) (0x00000140 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_n_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_n_MAXn 19
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x7c
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x2
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_OFST(n) (0x000001a0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_n_RMSK 0x7fff
+#define IPA_ENDP_INIT_AGGR_n_MAXn 19
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_ROUTE_n_OFST(n) (0x00000200 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_n_RMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_MAXn 19
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_1_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_SPARE_REG_2_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_SRAM_DIRECT_ACCESS_n_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_n_RMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_MAXn 2047
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_BMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_SHFT 0x0
+
+#endif /* __IPA_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
new file mode 100644
index 0000000..c69e1fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
+#define IPA_RT_TABLE_WORD_SIZE (4)
+#define IPA_RT_INDEX_BITMAP_SIZE (32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
+#define IPA_RT_BIT_MASK (0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
+
+/**
+ * ipa_generate_rt_hw_rule() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+ struct ipa_rt_entry *entry, u8 *buf)
+{
+ struct ipa_rt_rule_hw_hdr *rule_hdr;
+ const struct ipa_rt_rule *rule =
+ (const struct ipa_rt_rule *)&entry->rule;
+ u16 en_rule = 0;
+ u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+ u8 *start;
+ int pipe_idx;
+
+ memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+ if (buf == NULL)
+ buf = tmp;
+
+ start = buf;
+ rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+ pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+ entry->rule.dst);
+ if (pipe_idx == -1) {
+ IPAERR("Wrong destination pipe specified in RT rule\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+ rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+ if (entry->hdr)
+ rule_hdr->u.hdr.hdr_offset =
+ entry->hdr->offset_entry->offset >> 2;
+ else
+ rule_hdr->u.hdr.hdr_offset = 0;
+
+ buf += sizeof(struct ipa_rt_rule_hw_hdr);
+ if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+ IPAERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+
+ IPADBG("en_rule 0x%x\n", en_rule);
+
+ rule_hdr->u.hdr.en_rule = en_rule;
+ ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (entry->hw_len == 0) {
+ entry->hw_len = buf - start;
+ } else if (entry->hw_len != (buf - start)) {
+ IPAERR(
+ "hw_len differs b/w passes passed=0x%x calc=0x%x\n",
+ entry->hw_len,
+ (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+ int *max_rt_idx)
+{
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ u32 total_sz = 0;
+ u32 tbl_sz;
+ u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+ int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+ int i;
+
+ *hdr_sz = 0;
+ set = &ipa_ctx->rt_tbl_set[ip];
+
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (bitmap & IPA_RT_BIT_MASK)
+ highest_bit_set = i;
+ bitmap >>= 1;
+ }
+
+ *max_rt_idx = highest_bit_set;
+ if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("no rt tbls present\n");
+ total_sz = IPA_RT_TABLE_WORD_SIZE;
+ *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+ return total_sz;
+ }
+
+ *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+ total_sz += *hdr_sz;
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ tbl_sz = 0;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
+ IPAERR("failed to find HW RT rule size\n");
+ return -EPERM;
+ }
+ tbl_sz += entry->hw_len;
+ }
+
+ if (tbl_sz)
+ tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+ if (tbl->in_sys)
+ continue;
+
+ if (tbl_sz) {
+ /* add the terminator */
+ total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+ /* every rule-set should start at word boundary */
+ total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ }
+ }
+
+ IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+ return total_sz;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip: [in] the ip address family type
+ * @mem: [out] buffer to put the filtering table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_rt_tbl_set *set;
+ u32 hdr_sz;
+ u32 offset;
+ u8 *hdr;
+ u8 *body;
+ u8 *base;
+ struct ipa_mem_buffer rt_tbl_mem;
+ u8 *rt_tbl_mem_body;
+ int max_rt_idx;
+ int i;
+
+ mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+ mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+ ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+ if (mem->size == 0) {
+ IPAERR("rt tbl empty ip=%d\n", ip);
+ goto error;
+ }
+ mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+ GFP_KERNEL);
+ if (!mem->base) {
+ IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+ goto error;
+ }
+
+ memset(mem->base, 0, mem->size);
+
+ /* build the rt tbl in the DMA buffer to submit to IPA HW */
+ base = hdr = (u8 *)mem->base;
+ body = base + hdr_sz;
+
+ /* setup all indices to point to the empty sys rt tbl */
+ for (i = 0; i <= max_rt_idx; i++)
+ ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+ hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ offset = body - base;
+ if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+ IPAERR("offset is not word multiple %d\n", offset);
+ goto proc_err;
+ }
+
+ if (!tbl->in_sys) {
+ /* convert offset to words from bytes */
+ offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+ /* rule is at an offset from base */
+ offset |= IPA_RT_BIT_MASK;
+
+ /* update the hdr at the right index */
+ ipa_write_32(offset, hdr +
+ (tbl->idx * IPA_RT_TABLE_WORD_SIZE));
+
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry, body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ goto proc_err;
+ }
+ body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ body = ipa_write_32(0, body);
+ if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+ /* advance body to next word boundary */
+ body = body + (IPA_RT_TABLE_WORD_SIZE -
+ ((u32)body &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+ } else {
+ WARN_ON(tbl->sz == 0);
+ /* allocate memory for the RT tbl */
+ rt_tbl_mem.size = tbl->sz;
+ rt_tbl_mem.base =
+ dma_alloc_coherent(NULL, rt_tbl_mem.size,
+ &rt_tbl_mem.phys_base, GFP_KERNEL);
+ if (!rt_tbl_mem.base) {
+ IPAERR("fail to alloc DMA buff of size %d\n",
+ rt_tbl_mem.size);
+ WARN_ON(1);
+ goto proc_err;
+ }
+
+ WARN_ON(rt_tbl_mem.phys_base &
+ IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+ rt_tbl_mem_body = rt_tbl_mem.base;
+ memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+ /* update the hdr at the right index */
+ ipa_write_32(rt_tbl_mem.phys_base,
+ hdr + (tbl->idx *
+ IPA_RT_TABLE_WORD_SIZE));
+ /* generate the rule-set */
+ list_for_each_entry(entry, &tbl->head_rt_rule_list,
+ link) {
+ if (ipa_generate_rt_hw_rule(ip, entry,
+ rt_tbl_mem_body)) {
+ IPAERR("failed to gen HW RT rule\n");
+ WARN_ON(1);
+ goto rt_table_mem_alloc_failed;
+ }
+ rt_tbl_mem_body += entry->hw_len;
+ }
+
+ /* write the rule-set terminator */
+ rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+ if (tbl->curr_mem.phys_base) {
+ WARN_ON(tbl->prev_mem.phys_base);
+ tbl->prev_mem = tbl->curr_mem;
+ }
+ tbl->curr_mem = rt_tbl_mem;
+ }
+ }
+
+ return 0;
+
+rt_table_mem_alloc_failed:
+ dma_free_coherent(NULL, rt_tbl_mem.size,
+ rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+ return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *next;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (tbl->prev_mem.phys_base) {
+ IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+ dma_free_coherent(NULL, tbl->prev_mem.size,
+ tbl->prev_mem.base,
+ tbl->prev_mem.phys_base);
+ memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+ }
+ }
+
+ set = &ipa_ctx->reap_rt_tbl_set[ip];
+ list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+ list_del(&tbl->link);
+ WARN_ON(tbl->prev_mem.phys_base != 0);
+ if (tbl->curr_mem.phys_base) {
+ IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+ ip);
+ dma_free_coherent(NULL, tbl->curr_mem.size,
+ tbl->curr_mem.base,
+ tbl->curr_mem.phys_base);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ }
+ }
+}
+
+static int __ipa_commit_rt(enum ipa_ip_type ip)
+{
+ struct ipa_desc desc = { 0 };
+ struct ipa_mem_buffer *mem;
+ void *cmd;
+ struct ipa_ip_v4_routing_init *v4;
+ struct ipa_ip_v6_routing_init *v6;
+ u16 avail;
+ u16 size;
+
+ mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+ if (!mem) {
+ IPAERR("failed to alloc memory object\n");
+ goto fail_alloc_mem;
+ }
+
+ if (ip == IPA_IP_v4) {
+ avail = IPA_RAM_V4_RT_SIZE;
+ size = sizeof(struct ipa_ip_v4_routing_init);
+ } else {
+ avail = IPA_RAM_V6_RT_SIZE;
+ size = sizeof(struct ipa_ip_v6_routing_init);
+ }
+ cmd = kmalloc(size, GFP_KERNEL);
+ if (!cmd) {
+ IPAERR("failed to alloc immediate command object\n");
+ goto fail_alloc_cmd;
+ }
+
+ if (ipa_generate_rt_hw_tbl(ip, mem)) {
+ IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (mem->size > avail) {
+ IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+ goto fail_hw_tbl_gen;
+ }
+
+ if (ip == IPA_IP_v4) {
+ v4 = (struct ipa_ip_v4_routing_init *)cmd;
+ desc.opcode = IPA_IP_V4_ROUTING_INIT;
+ v4->ipv4_rules_addr = mem->phys_base;
+ v4->size_ipv4_rules = mem->size;
+ v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
+ } else {
+ v6 = (struct ipa_ip_v6_routing_init *)cmd;
+ desc.opcode = IPA_IP_V6_ROUTING_INIT;
+ v6->ipv6_rules_addr = mem->phys_base;
+ v6->size_ipv6_rules = mem->size;
+ v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
+ }
+
+ desc.pyld = cmd;
+ desc.len = size;
+ desc.type = IPA_IMM_CMD_DESC;
+ IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+ if (ipa_send_cmd(1, &desc)) {
+ IPAERR("fail to send immediate command\n");
+ goto fail_send_cmd;
+ }
+
+ __ipa_reap_sys_rt_tbls(ip);
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+ kfree(cmd);
+ kfree(mem);
+
+ return 0;
+
+fail_send_cmd:
+ if (mem->phys_base)
+ dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+ kfree(cmd);
+fail_alloc_cmd:
+ kfree(mem);
+fail_alloc_mem:
+ return -EPERM;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ * which name is given as parameter
+ * @ip: [in] the ip address family type of the wanted routing table
+ * @name: [in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+ if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+ const char *name)
+{
+ struct ipa_rt_tbl *entry;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_tree_node *node;
+ int i;
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto node_alloc_fail;
+ }
+
+ if (ip >= IPA_IP_MAX || name == NULL) {
+ IPAERR("bad parm\n");
+ goto error;
+ }
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ /* check if this table exists */
+ entry = __ipa_find_rt_tbl(ip, name);
+ if (!entry) {
+ entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT tbl object\n");
+ goto error;
+ }
+ /* find a routing tbl index */
+ for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+ if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+ entry->idx = i;
+ set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+ break;
+ }
+ }
+ if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+ IPAERR("not free RT tbl indices left\n");
+ goto fail_rt_idx_alloc;
+ }
+
+ INIT_LIST_HEAD(&entry->head_rt_rule_list);
+ INIT_LIST_HEAD(&entry->link);
+ strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+ entry->set = set;
+ entry->cookie = IPA_COOKIE;
+ entry->in_sys = (ip == IPA_IP_v4) ?
+ !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+ set->tbl_cnt++;
+ list_add(&entry->link, &set->head_rt_tbl_list);
+
+ IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+ set->tbl_cnt, ip);
+
+ node->hdl = (u32)entry;
+ if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ }
+ }
+
+ return entry;
+
+fail_rt_idx_alloc:
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+node_alloc_fail:
+ return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ if (!entry->in_sys) {
+ list_del(&entry->link);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+ } else {
+ list_move(&entry->link,
+ &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+ clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+ entry->set->tbl_cnt--;
+ IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+ entry->set->tbl_cnt);
+ }
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_entry *entry;
+ struct ipa_tree_node *node;
+
+ if (rule->hdr_hdl &&
+ ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
+ ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_COOKIE)) {
+ IPAERR("rt rule does not point to valid hdr\n");
+ goto error;
+ }
+
+ node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+ if (!node) {
+ IPAERR("failed to alloc tree node object\n");
+ goto error;
+ }
+
+ tbl = __ipa_add_rt_tbl(ip, name);
+ if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ goto fail_rt_tbl_sanity;
+ }
+ /*
+ * do not allow any rules to be added at end of the "default" routing
+ * tables
+ */
+ if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
+ (tbl->rule_cnt > 0) && (at_rear != 0)) {
+ IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+ tbl->rule_cnt, at_rear);
+ goto fail_rt_tbl_sanity;
+ }
+
+ entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+ if (!entry) {
+ IPAERR("failed to alloc RT rule object\n");
+ goto fail_rt_tbl_sanity;
+ }
+ INIT_LIST_HEAD(&entry->link);
+ entry->cookie = IPA_COOKIE;
+ entry->rule = *rule;
+ entry->tbl = tbl;
+ entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
+ if (at_rear)
+ list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+ else
+ list_add(&entry->link, &tbl->head_rt_rule_list);
+ tbl->rule_cnt++;
+ if (entry->hdr)
+ entry->hdr->ref_cnt++;
+ IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+ *rule_hdl = (u32)entry;
+
+ node->hdl = *rule_hdl;
+ if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
+ IPAERR("failed to add to tree\n");
+ WARN_ON(1);
+ goto ipa_insert_failed;
+ }
+
+ return 0;
+
+ipa_insert_failed:
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+fail_rt_tbl_sanity:
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+ return -EPERM;
+}
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (__ipa_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+static int __ipa_del_rt_rule(u32 rule_hdl)
+{
+ struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
+ struct ipa_tree_node *node;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+ IPAERR("bad params\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+ entry->tbl->rule_cnt);
+ if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry->tbl))
+ IPAERR("fail to del RT tbl\n");
+ }
+ entry->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+ return 0;
+}
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls: [inout] set of routing rules to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ int i;
+ int ret;
+
+ if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa_ctx->lock);
+ for (i = 0; i < hdls->num_hdls; i++) {
+ if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+ IPAERR("failed to del rt rule %i\n", i);
+ hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+ } else {
+ hdls->hdl[i].status = 0;
+ }
+ }
+
+ if (hdls->commit)
+ if (__ipa_commit_rt(hdls->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ int ret;
+ /*
+ * issue a commit on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_commit_flt(ip))
+ return -EPERM;
+
+ mutex_lock(&ipa_ctx->lock);
+ if (__ipa_commit_rt(ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa_ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip: The family of routing tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ struct ipa_rt_tbl *tbl;
+ struct ipa_rt_tbl *tbl_next;
+ struct ipa_rt_tbl_set *set;
+ struct ipa_rt_entry *rule;
+ struct ipa_rt_entry *rule_next;
+ struct ipa_tree_node *node;
+ struct ipa_rt_tbl_set *rset;
+
+ /*
+ * issue a reset on the filtering module of same IP type since
+ * filtering rules point to routing tables
+ */
+ if (ipa_reset_flt(ip))
+ IPAERR("fail to reset flt ip=%d\n", ip);
+
+ set = &ipa_ctx->rt_tbl_set[ip];
+ rset = &ipa_ctx->reap_rt_tbl_set[ip];
+ mutex_lock(&ipa_ctx->lock);
+ IPADBG("reset rt ip=%d\n", ip);
+ list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ list_for_each_entry_safe(rule, rule_next,
+ &tbl->head_rt_rule_list, link) {
+ node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
+ (u32)rule);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /*
+ * for the "default" routing tbl, remove all but the
+ * last rule
+ */
+ if (tbl->idx == 0 && tbl->rule_cnt == 1)
+ continue;
+
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ rule->hdr->ref_cnt--;
+ rule->cookie = 0;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
+ if (node == NULL)
+ WARN_ON(1);
+
+ /* do not remove the "default" routing tbl which has index 0 */
+ if (tbl->idx != 0) {
+ if (!tbl->in_sys) {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+ } else {
+ list_move(&tbl->link, &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ }
+ /* remove the handle from the database */
+ rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+ kmem_cache_free(ipa_ctx->tree_node_cache, node);
+ }
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup: [inout] routing table to lookup and its handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ * Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ struct ipa_rt_tbl *entry;
+ int result = -EFAULT;
+
+ if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+ mutex_lock(&ipa_ctx->lock);
+ entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
+ if (entry && entry->cookie == IPA_COOKIE) {
+ entry->ref_cnt++;
+ lookup->hdl = (uint32_t)entry;
+
+ /* commit for get */
+ if (__ipa_commit_rt(lookup->ip))
+ IPAERR("fail to commit RT tbl\n");
+
+ result = 0;
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl: [in] the routing table handle to release
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
+ struct ipa_tree_node *node;
+ enum ipa_ip_type ip = IPA_IP_MAX;
+
+ if (entry == NULL || (entry->cookie != IPA_COOKIE) ||
+ entry->ref_cnt == 0) {
+ IPAERR("bad parms\n");
+ return -EINVAL;
+ }
+ node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
+ if (node == NULL) {
+ IPAERR("lookup failed\n");
+ return -EPERM;
+ }
+
+ if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+ ip = IPA_IP_v4;
+ else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+ ip = IPA_IP_v6;
+ else
+ WARN_ON(1);
+
+ mutex_lock(&ipa_ctx->lock);
+ entry->ref_cnt--;
+ if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ if (__ipa_del_rt_tbl(entry))
+ IPAERR("fail to del RT tbl\n");
+ /* commit for put */
+ if (__ipa_commit_rt(ip))
+ IPAERR("fail to commit RT tbl\n");
+ }
+ mutex_unlock(&ipa_ctx->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
new file mode 100644
index 0000000..d5d5566
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h> /* gen_pool_alloc() */
+#include <linux/io.h>
+#include "ipa_i.h"
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1, -1 };
+
+static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+ { 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+ { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+};
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST,
+ IPA_SETFIELD(route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK) |
+ IPA_SETFIELD(route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
+ IPA_SETFIELD(route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+
+ return 0;
+}
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+ ipa_write_reg(ipa_ctx->mmio, IPA_FILTER_OFST,
+ IPA_SETFIELD(!disable,
+ IPA_FILTER_FILTER_EN_SHFT,
+ IPA_FILTER_FILTER_EN_BMSK));
+ return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+ u32 ipa_version = 0;
+
+ /* do soft reset of IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+ /* enable IPA */
+ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+ /* Read IPA version and make sure we have access to the registers */
+ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+ if (ipa_version == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @mode: IPA operating mode
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+ enum ipa_client_type client)
+{
+ return ep_mapping[mode][client];
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+ *dest++ = (u8)((hw) & 0xFF);
+ *dest++ = (u8)((hw >> 8) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+ *dest++ = (b) & 0xFF;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+ int i = (u32)dest & 0x3;
+ int j;
+
+ if (i)
+ for (j = 0; j < (4 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+
+ if (ip == IPA_IP_v4) {
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+ IPA_FLT_FLOW_LABEL) {
+ IPAERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_TOS_EQ;
+ *buf = ipa_write_8(attrib->u.v4.tos, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 12 => offset of src ip in v4 header */
+ *buf = ipa_write_8(12, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ /* 16 => offset of dst ip in v4 header */
+ *buf = ipa_write_8(16, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+ *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v4 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v4 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v4 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v4 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_IPV4_IS_FRAG;
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else if (ip == IPA_IP_v6) {
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ /* error check */
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL ||
+ attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ IPAERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_PROTOCOL_EQ;
+ *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of type after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->type, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 1 => offset of code after v6 header */
+ *buf = ipa_write_8(1, *buf);
+ *buf = ipa_write_32(0xFF, *buf);
+ *buf = ipa_write_32(attrib->code, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+ /* 0 => offset of SPI after v6 header FIXME */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_32(0xFFFFFFFF, *buf);
+ *buf = ipa_write_32(attrib->spi, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_write_16(attrib->src_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_write_16(attrib->dst_port, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 0 => offset of src port after v6 header */
+ *buf = ipa_write_8(0, *buf);
+ *buf = ipa_write_16(attrib->src_port_hi, *buf);
+ *buf = ipa_write_16(attrib->src_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+ IPAERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+ /* 2 => offset of dst port after v6 header */
+ *buf = ipa_write_8(2, *buf);
+ *buf = ipa_write_16(attrib->dst_port_hi, *buf);
+ *buf = ipa_write_16(attrib->dst_port_lo, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 8 => offset of src ip in v6 header */
+ *buf = ipa_write_8(8, *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (ipa_ofst_meq128[ofst_meq128] == -1) {
+ IPAERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq128[ofst_meq128];
+ /* 24 => offset of dst ip in v6 header */
+ *buf = ipa_write_8(24, *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+ *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+ *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_FLT_TC;
+ *buf = ipa_write_8(attrib->u.v6.tc, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_FLT_FLOW_LABEL;
+ /* FIXME FL is only 20 bits */
+ *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_METADATA_COMPARE;
+ *buf = ipa_write_8(0, *buf); /* offset, reserved */
+ *buf = ipa_write_32(attrib->meta_data_mask, *buf);
+ *buf = ipa_write_32(attrib->meta_data, *buf);
+ *buf = ipa_pad_to_32(*buf);
+ }
+
+ } else {
+ IPAERR("unsupported ip %d\n", ip);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ if (ipa_ofst_meq32[ofst_meq32] == -1) {
+ IPAERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= ipa_ofst_meq32[ofst_meq32];
+ *buf = ipa_write_8(0, *buf); /* offset */
+ *buf = ipa_write_32(0, *buf); /* mask */
+ *buf = ipa_write_32(0, *buf); /* val */
+ *buf = ipa_pad_to_32(*buf);
+ ofst_meq32++;
+ }
+
+ return 0;
+}
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ int result = -EINVAL;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+ if (result)
+ return result;
+
+ if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+ result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+ if (result)
+ return result;
+
+ result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+ /* clnt_hdl is used as pipe_index */
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() - IPA end-point header configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ u32 val;
+ struct ipa_ep_context *ep;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+
+ /* copy over EP cfg */
+ ep->cfg.hdr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
+ IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
+ ipa_ctx->ep[clnt_hdl].dst_pipe_index = ipa_get_ep_mapping(ipa_ctx->mode,
+ ipa_ep_cfg->dst);
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ u32 val;
+
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+ /* copy over EP cfg */
+ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
+
+ val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_n_OFST(clnt_hdl), val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg: [in] IPA end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+ ipa_ep_cfg == NULL) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+ IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ /*
+ * if DMA mode was configured previously for this EP, return with
+ * success
+ */
+ if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+ IPADBG("DMA mode for EP %d\n", clnt_hdl);
+ return 0;
+ }
+
+ if (ipa_ep_cfg->rt_tbl_hdl)
+ IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+ /* always use the "default" routing tables whose indices are 0 */
+ ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+ ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_n_OFST(clnt_hdl),
+ IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+ int i;
+ u32 *cur = (u32 *)base;
+ u8 *byt;
+ IPADBG("START phys=%x\n", phy_base);
+ for (i = 0; i < size / 4; i++) {
+ byt = (u8 *)(cur + i);
+ IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
+ byt[0], byt[1], byt[2], byt[3]);
+ }
+ IPADBG("END\n");
+}
+
+/**
+ * ipa_dump() - dumps part of driver data structures for debug purposes
+ */
+void ipa_dump(void)
+{
+ struct ipa_mem_buffer hdr_mem = { 0 };
+ struct ipa_mem_buffer rt_mem = { 0 };
+ struct ipa_mem_buffer flt_mem = { 0 };
+
+ mutex_lock(&ipa_ctx->lock);
+
+ if (ipa_generate_hdr_hw_tbl(&hdr_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
+ IPAERR("fail\n");
+ if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
+ IPAERR("fail\n");
+ IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
+ flt_mem.phys_base);
+ IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
+ (u32)rt_mem.base, (u32)flt_mem.base);
+ IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
+ flt_mem.size);
+ IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+ IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
+ IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
+ if (hdr_mem.phys_base)
+ dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
+ hdr_mem.phys_base);
+ if (rt_mem.phys_base)
+ dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
+ rt_mem.phys_base);
+ if (flt_mem.phys_base)
+ dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
+ flt_mem.phys_base);
+ mutex_unlock(&ipa_ctx->lock);
+}
+
+/*
+ * TODO: add swap if needed, for now assume LE is ok for device memory
+ * even though IPA registers are assumed to be BE
+ */
+/**
+ * ipa_write_dev_8() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram)
+{
+ iowrite8(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_16() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ *
+ */
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram)
+{
+ iowrite16(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_32() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram)
+{
+ iowrite32(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_8() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram)
+{
+ return ioread8((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_16() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram)
+{
+ return ioread16((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_32() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram)
+{
+ return ioread32((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_8rep() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf, unsigned long count)
+{
+ iowrite8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_write_dev_16rep() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_write_dev_32rep() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+ unsigned long count)
+{
+ iowrite32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ buf, count);
+}
+
+/**
+ * ipa_read_dev_8rep() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_16rep() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_read_dev_32rep() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+ ioread32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+ count);
+}
+
+/**
+ * ipa_memset_dev() - memset IO
+ * @ofst_ipa_sram: address to set
+ * @value: value
+ * @count: number of bytes to set
+ */
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count)
+{
+ memset_io((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), value,
+ count);
+}
+
+/**
+ * ipa_memcpy_from_dev() - copy memory from device
+ * @dest: buffer to copy to
+ * @ofst_ipa_sram: address
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count)
+{
+ memcpy_fromio(dest, (void *)((u32)ipa_ctx->mmio + 0x4000 +
+ ofst_ipa_sram), count);
+}
+
+/**
+ * ipa_memcpy_to_dev() - copy memory to device
+ * @ofst_ipa_sram: address
+ * @source: buffer to copy from
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count)
+{
+ memcpy_toio((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+ source, count);
+}
+
+/**
+ * ipa_defrag() - handle de-frag for bridging type of cases
+ * @skb: skb
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_defrag(struct sk_buff *skb)
+{
+ /*
+ * Reassemble IP fragments. TODO: need to setup network_header to
+ * point to start of IP header
+ */
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+ if (ip_defrag(skb, IP_DEFRAG_CONNTRACK_IN))
+ return -EINPROGRESS;
+ }
+
+ /* skb is not fully assembled, send it back out */
+ return 0;
+}
+
+/**
+ * ipa_search() - search for handle in RB tree
+ * @root: tree root
+ * @hdl: handle
+ *
+ * Return value: tree node corresponding to the handle
+ */
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct ipa_tree_node *data = container_of(node,
+ struct ipa_tree_node, node);
+
+ if (hdl < data->hdl)
+ node = node->rb_left;
+ else if (hdl > data->hdl)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/**
+ * ipa_insert() - insert new node to RB tree
+ * @root: tree root
+ * @data: new data to insert
+ *
+ * Return value:
+ * 0: success
+ * -EPERM: tree already contains the node with provided handle
+ */
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct ipa_tree_node *this = container_of(*new,
+ struct ipa_tree_node, node);
+
+ parent = *new;
+ if (data->hdl < this->hdl)
+ new = &((*new)->rb_left);
+ else if (data->hdl > this->hdl)
+ new = &((*new)->rb_right);
+ else
+ return -EPERM;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return 0;
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+ int res;
+ u32 aligned_start_ofst;
+ u32 aligned_size;
+ struct gen_pool *pool;
+
+ if (!size) {
+ IPAERR("no IPA pipe mem alloted\n");
+ goto fail;
+ }
+
+ aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+ aligned_size = size - (aligned_start_ofst - start_ofst);
+
+ IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+ start_ofst, aligned_start_ofst, size, aligned_size);
+
+ /* allocation order of 8 i.e. 128 bytes, global pool */
+ pool = gen_pool_create(8, -1);
+ if (!pool) {
+ IPAERR("Failed to create a new memory pool.\n");
+ goto fail;
+ }
+
+ res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+ if (res) {
+ IPAERR("Failed to add memory to IPA pipe pool\n");
+ goto err_pool_add;
+ }
+
+ ipa_ctx->pipe_mem_pool = pool;
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(pool);
+fail:
+ return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+ u32 vaddr;
+ int res = -1;
+
+ if (!ipa_ctx->pipe_mem_pool || !size) {
+ IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+ ipa_ctx->pipe_mem_pool);
+ return res;
+ }
+
+ vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+ if (vaddr) {
+ *ofst = vaddr;
+ res = 0;
+ IPADBG("size=%u ofst=%u\n", size, vaddr);
+ } else {
+ IPAERR("size=%u failed\n", size);
+ }
+
+ return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+ IPADBG("size=%u ofst=%u\n", size, ofst);
+ if (ipa_ctx->pipe_mem_pool && size)
+ gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+ return 0;
+}
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST,
+ ((mode & IPA_AGGREGATION_MODE_MSK) <<
+ IPA_AGGREGATION_MODE_SHFT) |
+ (reg_val & IPA_AGGREGATION_MODE_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ u32 reg_val;
+
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
+ IPA_AGGREGATION_QCNCM_SIG0_SHFT |
+ (sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
+ sig[2] | (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable: [in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns: 0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ u32 reg_val;
+ reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+ ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
+ IPA_AGGREGATION_SINGLE_NDP_MSK) |
+ (reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+ u32 next_start;
+ u32 prev_end;
+
+ IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+ next_start = (start + (boundary - 1)) & ~(boundary - 1);
+ prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+ while (next_start < prev_end)
+ next_start += boundary;
+
+ if (next_start == prev_end)
+ return 1;
+ else
+ return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
new file mode 100644
index 0000000..3c7f5ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "a2_service.h"
+#include "ipa_i.h"
+
+static struct rmnet_bridge_cb_type {
+ u32 producer_handle;
+ u32 consumer_handle;
+ bool is_connected;
+} rmnet_bridge_cb;
+
+/**
+* rmnet_bridge_init() - Initialize RmNet bridge module
+*
+* Return codes:
+* 0: success
+*/
+int rmnet_bridge_init(void)
+{
+ memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
+
+ return 0;
+}
+EXPORT_SYMBOL(rmnet_bridge_init);
+
+/**
+* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_disconnect(void)
+{
+ int ret = 0;
+ if (false == rmnet_bridge_cb.is_connected) {
+ pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.is_connected = false;
+
+ ret = ipa_bridge_teardown(IPA_DL);
+ ret = ipa_bridge_teardown(IPA_UL);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_disconnect);
+
+/**
+* rmnet_bridge_connect() - Connect RmNet bridge module
+* @producer_hdl: IPA producer handle
+* @consumer_hdl: IPA consumer handle
+* @wwan_logical_channel_id: WWAN logical channel ID
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_connect(u32 producer_hdl,
+ u32 consumer_hdl,
+ int wwan_logical_channel_id)
+{
+ int ret = 0;
+
+ if (true == rmnet_bridge_cb.is_connected) {
+ ret = 0;
+ pr_err("%s: trying to connect already connected RmNet bridge\n",
+ __func__);
+ goto bail;
+ }
+
+ rmnet_bridge_cb.consumer_handle = consumer_hdl;
+ rmnet_bridge_cb.producer_handle = producer_hdl;
+ rmnet_bridge_cb.is_connected = true;
+
+ ret = ipa_bridge_setup(IPA_DL);
+ if (ret) {
+ pr_err("%s: IPA DL bridge setup failure\n", __func__);
+ goto bail_dl;
+ }
+ ret = ipa_bridge_setup(IPA_UL);
+ if (ret) {
+ pr_err("%s: IPA UL bridge setup failure\n", __func__);
+ goto bail_ul;
+ }
+ return 0;
+bail_ul:
+ ipa_bridge_teardown(IPA_DL);
+bail_dl:
+ rmnet_bridge_cb.is_connected = false;
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_connect);
+
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle)
+{
+ if (producer_handle == NULL || consumer_handle == NULL)
+ return;
+
+ *producer_handle = rmnet_bridge_cb.producer_handle;
+ *consumer_handle = rmnet_bridge_cb.consumer_handle;
+}
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index cb6b23e..8dbdfa3 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -291,6 +291,7 @@
u8 active_path;
int recent_reported_soc;
int battery_less_hardware;
+ int ibatmax_max_adj_ma;
};
/* user space parameter to limit usb current */
@@ -635,10 +636,26 @@
}
#define PM8921_CHG_IBATMAX_MIN 325
-#define PM8921_CHG_IBATMAX_MAX 2000
+#define PM8921_CHG_IBATMAX_MAX 3025
#define PM8921_CHG_I_MIN_MA 225
#define PM8921_CHG_I_STEP_MA 50
#define PM8921_CHG_I_MASK 0x3F
+static int pm_chg_ibatmax_get(struct pm8921_chg_chip *chip, int *ibat_ma)
+{
+ u8 temp;
+ int rc;
+
+ rc = pm8xxx_readb(chip->dev->parent, CHG_IBAT_MAX, &temp);
+ if (rc) {
+ pr_err("rc = %d while reading ibat max\n", rc);
+ *ibat_ma = 0;
+ return rc;
+ }
+ *ibat_ma = (int)(temp & PM8921_CHG_I_MASK) * PM8921_CHG_I_STEP_MA
+ + PM8921_CHG_I_MIN_MA;
+ return 0;
+}
+
static int pm_chg_ibatmax_set(struct pm8921_chg_chip *chip, int chg_current)
{
u8 temp;
@@ -2891,6 +2908,30 @@
return IRQ_HANDLED;
}
+struct ibatmax_max_adj_entry {
+ int ibat_max_ma;
+ int max_adj_ma;
+};
+
+static struct ibatmax_max_adj_entry ibatmax_adj_table[] = {
+ {975, 300},
+ {1475, 150},
+ {1975, 200},
+ {2475, 250},
+};
+
+static int find_ibat_max_adj_ma(int ibat_target_ma)
+{
+ int i = 0;
+
+ for (i = ARRAY_SIZE(ibatmax_adj_table) - 1; i >= 0; i--) {
+ if (ibat_target_ma <= ibatmax_adj_table[i].ibat_max_ma)
+ break;
+ }
+
+ return ibatmax_adj_table[i].max_adj_ma;
+}
+
static irqreturn_t fastchg_irq_handler(int irq, void *data)
{
struct pm8921_chg_chip *chip = data;
@@ -4207,6 +4248,81 @@
}
DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
+static int reg_loop;
+#define MAX_REG_LOOP_CHAR 10
+static int get_reg_loop_param(char *buf, struct kernel_param *kp)
+{
+ u8 temp;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ temp = pm_chg_get_regulation_loop(the_chip);
+ return snprintf(buf, MAX_REG_LOOP_CHAR, "%d", temp);
+}
+module_param_call(reg_loop, NULL, get_reg_loop_param,
+ ®_loop, 0644);
+
+static int max_chg_ma;
+#define MAX_MA_CHAR 10
+static int get_max_chg_ma_param(char *buf, struct kernel_param *kp)
+{
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ return snprintf(buf, MAX_MA_CHAR, "%d", the_chip->max_bat_chg_current);
+}
+module_param_call(max_chg_ma, NULL, get_max_chg_ma_param,
+ &max_chg_ma, 0644);
+static int ibatmax_ma;
+static int set_ibat_max(const char *val, struct kernel_param *kp)
+{
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("error setting value %d\n", rc);
+ return rc;
+ }
+
+ if (abs(ibatmax_ma - the_chip->max_bat_chg_current)
+ <= the_chip->ibatmax_max_adj_ma) {
+ rc = pm_chg_ibatmax_set(the_chip, ibatmax_ma);
+ if (rc) {
+ pr_err("Failed to set ibatmax rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+static int get_ibat_max(char *buf, struct kernel_param *kp)
+{
+ int ibat_ma;
+ int rc;
+
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+
+ rc = pm_chg_ibatmax_get(the_chip, &ibat_ma);
+ if (rc) {
+ pr_err("ibatmax_get error = %d\n", rc);
+ return rc;
+ }
+
+ return snprintf(buf, MAX_MA_CHAR, "%d", ibat_ma);
+}
+module_param_call(ibatmax_ma, set_ibat_max, get_ibat_max,
+ &ibatmax_ma, 0644);
enum {
BAT_WARM_ZONE,
BAT_COOL_ZONE,
@@ -4445,6 +4561,9 @@
if (chip->battery_less_hardware)
charging_disabled = 1;
+ chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
+ chip->max_bat_chg_current);
+
rc = pm8921_chg_hw_init(chip);
if (rc) {
pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/smb137c-charger.c b/drivers/power/smb137c-charger.c
index b865bd7..9cdf5b5 100644
--- a/drivers/power/smb137c-charger.c
+++ b/drivers/power/smb137c-charger.c
@@ -992,29 +992,47 @@
{
struct smb137c_chip *chip = container_of(psy, struct smb137c_chip, psy);
union power_supply_propval prop = {0,};
+ int scope = POWER_SUPPLY_SCOPE_DEVICE;
+ int current_limit = USB_MIN_CURRENT_UA;
+ int online = 0;
+ int rc;
mutex_lock(&chip->lock);
dev_dbg(&chip->client->dev, "%s: start\n", __func__);
- chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_ONLINE,
- &prop);
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB online property, rc=%d\n",
+ __func__, rc);
+ else
+ online = prop.intval;
- if (prop.intval) {
- /* USB online */
- chip->usb_psy->get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_SCOPE, &prop);
- if (prop.intval == POWER_SUPPLY_SCOPE_SYSTEM) {
- /* USB host mode */
- smb137c_enable_otg_mode(chip);
- smb137c_disable_charging(chip);
- } else {
- /* USB device mode */
- chip->usb_psy->get_property(chip->usb_psy,
+ rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_SCOPE,
+ &prop);
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB scope property, rc=%d\n",
+ __func__, rc);
+ else
+ scope = prop.intval;
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
- smb137c_set_usb_input_current_limit(chip, prop.intval);
- smb137c_enable_charging(chip);
- smb137c_disable_otg_mode(chip);
- }
+ if (rc)
+ dev_err(&chip->client->dev, "%s: could not read USB current_max property, rc=%d\n",
+ __func__, rc);
+ else
+ current_limit = prop.intval;
+
+ if (scope == POWER_SUPPLY_SCOPE_SYSTEM) {
+ /* USB host mode */
+ smb137c_disable_charging(chip);
+ smb137c_enable_otg_mode(chip);
+ } else if (online) {
+ /* USB online in device mode */
+ smb137c_set_usb_input_current_limit(chip, current_limit);
+ smb137c_enable_charging(chip);
+ smb137c_disable_otg_mode(chip);
} else {
/* USB offline */
smb137c_disable_charging(chip);
@@ -1318,7 +1336,6 @@
};
MODULE_DEVICE_TABLE(i2c, smb137c_id);
-/* TODO: should this be "summit,smb137c-charger"? */
static const struct of_device_id smb137c_match[] = {
{ .compatible = "summit,smb137c", },
{ },
diff --git a/drivers/power/smb350_charger.c b/drivers/power/smb350_charger.c
index dc0c4bd..21d7aea 100644
--- a/drivers/power/smb350_charger.c
+++ b/drivers/power/smb350_charger.c
@@ -229,20 +229,18 @@
return power_ok;
}
-static bool smb350_is_charging(struct i2c_client *client)
+static bool smb350_is_charger_present(struct i2c_client *client)
{
int val;
- bool is_charging;
+ /* Normally the device is non-removable and embedded on the board.
+ * Verify that charger is present by getting I2C response.
+ */
val = smb350_read_reg(client, STATUS_B_REG);
if (val < 0)
return false;
- val = (val >> 1) & 0x3;
-
- is_charging = (val != 0);
-
- return is_charging;
+ return true;
}
static int smb350_get_prop_charge_type(struct smb350_device *dev)
@@ -408,10 +406,10 @@
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = smb350_is_dc_present(client);
+ val->intval = smb350_is_charger_present(client);
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = smb350_is_charging(client);
+ val->intval = smb350_is_dc_present(client);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = smb350_get_prop_charge_type(dev);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 57cde45..0ebb944 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1897,6 +1897,8 @@
if (rdev->desc->ops->list_voltage)
selector = rdev->desc->ops->list_voltage(rdev,
selector);
+ else if (rdev->desc->ops->get_voltage)
+ selector = rdev->desc->ops->get_voltage(rdev);
else
selector = -1;
} else if (rdev->desc->ops->set_voltage_sel) {
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 0549593..a330f1b 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -550,11 +550,12 @@
}
static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
- int min_uV, int max_uV, int *range_sel, int *voltage_sel)
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
{
struct qpnp_voltage_range *range;
int uV = min_uV;
- int lim_min_uV, lim_max_uV, i;
+ int lim_min_uV, lim_max_uV, i, range_id;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -575,7 +576,8 @@
for (i = vreg->set_points->count - 1; i > 0; i--)
if (uV > vreg->set_points->range[i - 1].max_uV)
break;
- range = &vreg->set_points->range[i];
+ range_id = i;
+ range = &vreg->set_points->range[range_id];
*range_sel = range->range_sel;
/*
@@ -594,6 +596,11 @@
return -EINVAL;
}
+ *selector = 0;
+ for (i = 0; i < range_id; i++)
+ *selector += vreg->set_points->range[i].n_voltages;
+ *selector += (uV - range->set_point_min_uV) / range->step_uV;
+
return 0;
}
@@ -605,7 +612,7 @@
u8 buf[2];
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
@@ -669,7 +676,7 @@
int rc, range_sel, voltage_sel;
rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &voltage_sel);
+ &voltage_sel, selector);
if (rc) {
vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
return rc;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index c26da60..0497a32 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,17 +30,18 @@
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/debugfs.h>
-#include <mach/msm_spi.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <mach/dma.h>
-#include <asm/atomic.h>
-#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/remote_spinlock.h>
#include <linux/pm_qos.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <mach/msm_spi.h>
+#include <mach/sps.h>
+#include <mach/dma.h>
#include "spi_qsd.h"
static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
@@ -211,16 +212,19 @@
&dd->output_block_size, block, mult)) {
goto fifo_size_err;
}
- /* DM mode is not available for this block size */
- if (dd->input_block_size == 4 || dd->output_block_size == 4)
- dd->use_dma = 0;
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ /* DM mode is not available for this block size */
+ if (dd->input_block_size == 4 || dd->output_block_size == 4)
+ dd->use_dma = 0;
- /* DM mode is currently unsupported for different block sizes */
- if (dd->input_block_size != dd->output_block_size)
- dd->use_dma = 0;
+ /* DM mode is currently unsupported for different block sizes */
+ if (dd->input_block_size != dd->output_block_size)
+ dd->use_dma = 0;
- if (dd->use_dma)
- dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
+ if (dd->use_dma)
+ dd->burst_size = max(dd->input_block_size,
+ DM_BURST_SIZE);
+ }
return;
@@ -352,14 +356,19 @@
return 0;
}
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
{
*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
if (n != (*config & SPI_CFG_N))
*config = (*config & ~SPI_CFG_N) | n;
- if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
+ if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
+ || (dd->mode == SPI_BAM_MODE)) {
if (dd->read_buf == NULL)
*config |= SPI_NO_INPUT;
if (dd->write_buf == NULL)
@@ -367,23 +376,207 @@
}
}
-static void msm_spi_set_config(struct msm_spi *dd, int bpw)
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
{
- u32 spi_config;
-
- spi_config = readl_relaxed(dd->base + SPI_CONFIG);
-
- if (dd->cur_msg->spi->mode & SPI_CPHA)
- spi_config &= ~SPI_CFG_INPUT_FIRST;
- else
- spi_config |= SPI_CFG_INPUT_FIRST;
- if (dd->cur_msg->spi->mode & SPI_LOOP)
+ if (mode & SPI_LOOP)
spi_config |= SPI_CFG_LOOPBACK;
else
spi_config &= ~SPI_CFG_LOOPBACK;
- msm_spi_add_configs(dd, &spi_config, bpw-1);
+
+ if (mode & SPI_CPHA)
+ spi_config &= ~SPI_CFG_INPUT_FIRST;
+ else
+ spi_config |= SPI_CFG_INPUT_FIRST;
+
+ return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+ u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+ spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+ spi_config, dd->cur_msg->spi->mode);
+
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+ /* flags removed from SPI_CONFIG in QUP version-2 */
+ msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+ else if (dd->mode == SPI_BAM_MODE)
+ spi_config |= SPI_CFG_INPUT_FIRST;
+
writel_relaxed(spi_config, dd->base + SPI_CONFIG);
- msm_spi_set_qup_config(dd, bpw);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+ /*
+ * n_words cannot exceed fifo_size, and only one READ COUNT
+ * interrupt is generated per transaction, so for transactions
+ * larger than fifo size READ COUNT must be disabled.
+ * For those transactions we usually move to Data Mover mode.
+ */
+ if (dd->mode == SPI_FIFO_MODE) {
+ if (n_words <= dd->input_fifo_size) {
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, n_words);
+ } else {
+ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, 0);
+ }
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+ /* must be zero for FIFO */
+ writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+ writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+ }
+ } else {
+ /* must be zero for BAM and DMOV */
+ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, 0);
+
+ /*
+ * for DMA transfers, both QUP_MX_INPUT_COUNT and
+ * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+ * That case is a non-balanced transfer when there is
+ * only a read_buf.
+ */
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+ if (dd->write_buf)
+ writel_relaxed(0,
+ dd->base + SPI_MX_INPUT_COUNT);
+ else
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_INPUT_COUNT);
+
+ writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+ }
+ }
+}
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time. If another tsranfer is
+ * required, it waits for the trasfer to finish, then moving to reset
+ * state, and back to run state to issue the next transfer.
+ * The function dose not wait for the last transfer to end, or if only
+ * a single transfer is required, the function dose not wait for it to
+ * end.
+ * @timeout max time in jiffies to wait for a transfer to finish.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
+{
+ u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
+ int ret;
+ /*
+ * QUP must move to reset mode every 64K-1 bytes of transfer
+ * (counter is 16 bit)
+ */
+ if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
+ /* assert chip select unconditionally */
+ u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+ if (!(spi_ioc & SPI_IO_C_FORCE_CS))
+ writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
+ dd->base + SPI_IO_CONTROL);
+ }
+
+ /* Following flags are required since we are waiting on all transfers */
+ cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+ /*
+ * on a balanced transaction, BAM will set the flags on the producer
+ * pipe based on the flags set on the consumer pipe
+ */
+ prod_flags = (dd->write_buf) ? 0 : cons_flags;
+
+ while (dd->tx_bytes_remaining > 0) {
+ bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
+ bytes_to_send = min_t(u32, dd->tx_bytes_remaining
+ , SPI_MAX_TRFR_BTWN_RESETS);
+ n_words_xfr = DIV_ROUND_UP(bytes_to_send
+ , dd->bytes_per_word);
+
+ msm_spi_set_mx_counts(dd, n_words_xfr);
+
+ ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to set QUP state to run",
+ __func__);
+ goto xfr_err;
+ }
+
+ /* enqueue read buffer in BAM */
+ if (dd->read_buf) {
+ ret = sps_transfer_one(dd->bam.prod.handle,
+ dd->cur_transfer->rx_dma + bytes_sent,
+ bytes_to_send, dd, prod_flags);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to queue producer BAM transfer",
+ __func__);
+ goto xfr_err;
+ }
+ }
+
+ /* enqueue write buffer in BAM */
+ if (dd->write_buf) {
+ ret = sps_transfer_one(dd->bam.cons.handle,
+ dd->cur_transfer->tx_dma + bytes_sent,
+ bytes_to_send, dd, cons_flags);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to queue consumer BAM transfer",
+ __func__);
+ goto xfr_err;
+ }
+ }
+
+ dd->tx_bytes_remaining -= bytes_to_send;
+
+ /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
+ if (dd->tx_bytes_remaining > 0) {
+ if (!wait_for_completion_timeout(
+ &dd->transfer_complete, timeout)) {
+ dev_err(dd->dev,
+ "%s: SPI transaction timeout",
+ __func__);
+ dd->cur_msg->status = -EIO;
+ ret = -EIO;
+ goto xfr_err;
+ }
+ ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+ if (ret < 0) {
+ dev_err(dd->dev,
+ "%s: Failed to set QUP state to reset",
+ __func__);
+ goto xfr_err;
+ }
+ init_completion(&dd->transfer_complete);
+ }
+ }
+ return 0;
+
+xfr_err:
+ return ret;
}
static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
@@ -767,7 +960,15 @@
return IRQ_HANDLED;
}
-static int msm_spi_map_dma_buffers(struct msm_spi *dd)
+/**
+ * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
+ * @return zero on success or negative error code
+ *
+ * calls dma_map_single() on the read/write buffers, effectively invalidating
+ * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
+ * buffer and copy the data to/from the client buffers
+ */
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
{
struct device *dev;
struct spi_transfer *first_xfr;
@@ -847,7 +1048,7 @@
return ret;
}
-static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
+static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
{
struct device *dev;
u32 offset;
@@ -914,56 +1115,190 @@
}
}
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+ struct device *dev;
+
+ /* mapped by client */
+ if (dd->cur_msg->is_dma_mapped)
+ return;
+
+ dev = &dd->cur_msg->spi->dev;
+ if (dd->cur_transfer->rx_buf)
+ dma_unmap_single(dev, dd->cur_transfer->rx_dma,
+ dd->cur_transfer->len,
+ DMA_FROM_DEVICE);
+
+ if (dd->cur_transfer->tx_buf)
+ dma_unmap_single(dev, dd->cur_transfer->tx_dma,
+ dd->cur_transfer->len,
+ DMA_TO_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+ if (dd->mode == SPI_DMOV_MODE)
+ msm_spi_dmov_unmap_buffers(dd);
+ else if (dd->mode == SPI_BAM_MODE)
+ msm_spi_bam_unmap_buffers(dd);
+}
+
/**
- * msm_use_dm - decides whether to use data mover for this
- * transfer
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
* @dd: device
* @tr: transfer
*
- * Start using DM if:
- * 1. Transfer is longer than 3*block size.
- * 2. Buffers should be aligned to cache line.
- * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platfrom data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
*/
-static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
- u8 bpw)
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
{
- u32 cache_line = dma_get_cache_alignment();
-
if (!dd->use_dma)
- return 0;
+ return false;
+
+ /* check constraints from platform data */
+ if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+ return false;
if (dd->cur_msg_len < 3*dd->input_block_size)
- return 0;
+ return false;
if (dd->multi_xfr && !dd->read_len && !dd->write_len)
- return 0;
+ return false;
- if (tr->tx_buf) {
- if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
- return 0;
- }
- if (tr->rx_buf) {
- if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
- return 0;
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ u32 cache_line = dma_get_cache_alignment();
+
+ if (tr->tx_buf) {
+ if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+ return 0;
+ }
+ if (tr->rx_buf) {
+ if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+ return false;
+ }
+
+ if (tr->cs_change &&
+ ((bpw != 8) || (bpw != 16) || (bpw != 32)))
+ return false;
}
- if (tr->cs_change &&
- ((bpw != 8) || (bpw != 16) || (bpw != 32)))
- return 0;
- return 1;
+ return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+ if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+ if (dd->qup_ver) {
+ dd->mode = SPI_BAM_MODE;
+ } else {
+ dd->mode = SPI_DMOV_MODE;
+ if (dd->write_len && dd->read_len) {
+ dd->tx_bytes_remaining = dd->write_len;
+ dd->rx_bytes_remaining = dd->read_len;
+ }
+ }
+ } else {
+ dd->mode = SPI_FIFO_MODE;
+ if (dd->multi_xfr) {
+ dd->read_len = dd->cur_transfer->len;
+ dd->write_len = dd->cur_transfer->len;
+ }
+ }
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+ u32 spi_iom;
+ spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+ /* Set input and output transfer mode: FIFO, DMOV, or BAM */
+ spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+ spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
+ spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
+ /* Turn on packing for data mover */
+ if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
+ spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+ else
+ spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+
+ /*if (dd->mode == SPI_BAM_MODE) {
+ spi_iom |= SPI_IO_C_NO_TRI_STATE;
+ spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
+ }*/
+ writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+ if (mode & SPI_CPOL)
+ spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+ return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+ u32 spi_ioc, spi_ioc_orig, chip_select;
+
+ spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+ spi_ioc_orig = spi_ioc;
+ spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+ , dd->cur_msg->spi->mode);
+ /* Set chip-select */
+ chip_select = dd->cur_msg->spi->chip_select << 2;
+ if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+ spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+ if (!dd->cur_transfer->cs_change)
+ spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+ if (spi_ioc != spi_ioc_orig)
+ writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+ return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+ /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+ * change in BAM mode */
+ u32 mask = (dd->mode == SPI_BAM_MODE) ?
+ QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+ : 0;
+ writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
}
static void msm_spi_process_transfer(struct msm_spi *dd)
{
u8 bpw;
- u32 spi_ioc;
- u32 spi_iom;
- u32 spi_ioc_orig;
u32 max_speed;
- u32 chip_select;
u32 read_count;
u32 timeout;
+ u32 spi_ioc;
u32 int_loopback = 0;
dd->tx_bytes_remaining = dd->cur_msg_len;
@@ -987,6 +1322,10 @@
if (!dd->clock_speed || max_speed != dd->clock_speed)
msm_spi_clock_set(dd, max_speed);
+ timeout = 100 * msecs_to_jiffies(
+ DIV_ROUND_UP(dd->cur_msg_len * 8,
+ DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
if (dd->cur_msg->spi->mode & SPI_LOOP)
int_loopback = 1;
@@ -1004,60 +1343,24 @@
__func__);
return;
}
- if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
- dd->mode = SPI_FIFO_MODE;
- if (dd->multi_xfr) {
- dd->read_len = dd->cur_transfer->len;
- dd->write_len = dd->cur_transfer->len;
- }
- /* read_count cannot exceed fifo_size, and only one READ COUNT
- interrupt is generated per transaction, so for transactions
- larger than fifo size READ COUNT must be disabled.
- For those transactions we usually move to Data Mover mode.
- */
- if (read_count <= dd->input_fifo_size) {
- writel_relaxed(read_count,
- dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, read_count);
- } else {
- writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
- msm_spi_set_write_count(dd, 0);
- }
- } else {
- dd->mode = SPI_DMOV_MODE;
- if (dd->write_len && dd->read_len) {
- dd->tx_bytes_remaining = dd->write_len;
- dd->rx_bytes_remaining = dd->read_len;
- }
- }
- /* Write mode - fifo or data mover*/
- spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
- spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
- spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
- spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
- /* Turn on packing for data mover */
- if (dd->mode == SPI_DMOV_MODE)
- spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
- else
- spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
- writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+ if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+ dev_err(dd->dev,
+ "%s: Error setting QUP to reset-state",
+ __func__);
- msm_spi_set_config(dd, bpw);
-
- spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
- spi_ioc_orig = spi_ioc;
- if (dd->cur_msg->spi->mode & SPI_CPOL)
- spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
- else
- spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
- chip_select = dd->cur_msg->spi->chip_select << 2;
- if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
- spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
- if (!dd->cur_transfer->cs_change)
- spi_ioc |= SPI_IO_C_MX_CS_MODE;
- if (spi_ioc != spi_ioc_orig)
- writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+ msm_spi_set_transfer_mode(dd, bpw, read_count);
+ msm_spi_set_mx_counts(dd, read_count);
+ if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
+ if (msm_spi_dma_map_buffers(dd) < 0) {
+ pr_err("Mapping DMA buffers\n");
+ return;
+ }
+ msm_spi_set_qup_io_modes(dd);
+ msm_spi_set_spi_config(dd, bpw);
+ msm_spi_set_qup_config(dd, bpw);
+ spi_ioc = msm_spi_set_spi_io_control(dd);
+ msm_spi_set_qup_op_mask(dd);
if (dd->mode == SPI_DMOV_MODE) {
msm_spi_setup_dm_transfer(dd);
@@ -1071,27 +1374,35 @@
if (msm_spi_prepare_for_write(dd))
goto transfer_end;
msm_spi_start_write(dd, read_count);
+ } else if (dd->mode == SPI_BAM_MODE) {
+ if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
+ dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+ __func__);
}
- /* Only enter the RUN state after the first word is written into
- the output FIFO. Otherwise, the output FIFO EMPTY interrupt
- might fire before the first word is written resulting in a
- possible race condition.
+ /*
+ * On BAM mode, current state here is run.
+ * Only enter the RUN state after the first word is written into
+ * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+ * might fire before the first word is written resulting in a
+ * possible race condition.
*/
- if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
- goto transfer_end;
-
- timeout = 100 * msecs_to_jiffies(
- DIV_ROUND_UP(dd->cur_msg_len * 8,
- DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+ if (dd->mode != SPI_BAM_MODE)
+ if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+ dev_warn(dd->dev,
+ "%s: Failed to set QUP to run-state. Mode:%d",
+ __func__, dd->mode);
+ goto transfer_end;
+ }
/* Assume success, this might change later upon transaction result */
dd->cur_msg->status = 0;
do {
if (!wait_for_completion_timeout(&dd->transfer_complete,
timeout)) {
- dev_err(dd->dev, "%s: SPI transaction "
- "timeout\n", __func__);
+ dev_err(dd->dev,
+ "%s: SPI transaction timeout\n",
+ __func__);
dd->cur_msg->status = -EIO;
if (dd->mode == SPI_DMOV_MODE) {
msm_dmov_flush(dd->tx_dma_chan, 1);
@@ -1102,8 +1413,7 @@
} while (msm_spi_dm_send_next(dd));
transfer_end:
- if (dd->mode == SPI_DMOV_MODE)
- msm_spi_unmap_dma_buffers(dd);
+ msm_spi_dma_unmap_buffers(dd);
dd->mode = SPI_MODE_NONE;
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
@@ -1266,10 +1576,10 @@
* WR-WR or WR-RD transfers
*/
if ((!dd->cur_msg->is_dma_mapped) &&
- (msm_use_dm(dd, dd->cur_transfer,
+ (msm_spi_use_dma(dd, dd->cur_transfer,
dd->cur_transfer->bits_per_word))) {
/* Mapping of DMA buffers */
- int ret = msm_spi_map_dma_buffers(dd);
+ int ret = msm_spi_dma_map_buffers(dd);
if (ret < 0) {
dd->cur_msg->status = ret;
goto error;
@@ -1474,22 +1784,13 @@
spi_ioc |= mask;
else
spi_ioc &= ~mask;
- if (spi->mode & SPI_CPOL)
- spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
- else
- spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+ spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
spi_config = readl_relaxed(dd->base + SPI_CONFIG);
- if (spi->mode & SPI_LOOP)
- spi_config |= SPI_CFG_LOOPBACK;
- else
- spi_config &= ~SPI_CFG_LOOPBACK;
- if (spi->mode & SPI_CPHA)
- spi_config &= ~SPI_CFG_INPUT_FIRST;
- else
- spi_config |= SPI_CFG_INPUT_FIRST;
+ spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+ spi_config, spi->mode);
writel_relaxed(spi_config, dd->base + SPI_CONFIG);
/* Ensure previous write completed before disabling the clocks */
@@ -1730,7 +2031,7 @@
roundup(dd->burst_size, cache_line))*2;
}
-static void msm_spi_teardown_dma(struct msm_spi *dd)
+static void msm_spi_dmov_teardown(struct msm_spi *dd)
{
int limit = 0;
@@ -1749,7 +2050,171 @@
dd->tx_padding = dd->rx_padding = NULL;
}
-static __init int msm_spi_init_dma(struct msm_spi *dd)
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+ enum msm_spi_pipe_direction pipe_dir)
+{
+ struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+ (&dd->bam.prod) : (&dd->bam.cons);
+ if (!pipe->teardown_required)
+ return;
+
+ sps_disconnect(pipe->handle);
+ dma_free_coherent(dd->dev, pipe->config.desc.size,
+ pipe->config.desc.base, pipe->config.desc.phys_base);
+ sps_free_endpoint(pipe->handle);
+ pipe->handle = 0;
+ pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+ enum msm_spi_pipe_direction pipe_dir)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_register_event event = {0};
+ struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+ (&dd->bam.prod) : (&dd->bam.cons);
+ struct sps_connect *pipe_conf = &pipe->config;
+
+ pipe->handle = 0;
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+ , __func__);
+ return -ENOMEM;
+ }
+
+ memset(pipe_conf, 0, sizeof(*pipe_conf));
+ rc = sps_get_config(pipe_handle, pipe_conf);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+ , __func__);
+ goto config_err;
+ }
+
+ if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+ pipe_conf->source = dd->bam.handle;
+ pipe_conf->destination = SPS_DEV_HANDLE_MEM;
+ pipe_conf->mode = SPS_MODE_SRC;
+ pipe_conf->src_pipe_index =
+ dd->pdata->bam_producer_pipe_index;
+ pipe_conf->dest_pipe_index = 0;
+ } else {
+ pipe_conf->source = SPS_DEV_HANDLE_MEM;
+ pipe_conf->destination = dd->bam.handle;
+ pipe_conf->mode = SPS_MODE_DEST;
+ pipe_conf->src_pipe_index = 0;
+ pipe_conf->dest_pipe_index =
+ dd->pdata->bam_consumer_pipe_index;
+ }
+ pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+ pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+ pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
+ pipe_conf->desc.size,
+ &pipe_conf->desc.phys_base,
+ GFP_KERNEL);
+ if (!pipe_conf->desc.base) {
+ dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
+ , __func__);
+ rc = -ENOMEM;
+ goto config_err;
+ }
+
+ memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+ rc = sps_connect(pipe_handle, pipe_conf);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
+ goto connect_err;
+ }
+
+ event.mode = SPS_TRIGGER_WAIT;
+ event.options = SPS_O_EOT;
+ event.xfer_done = &dd->transfer_complete;
+ event.user = (void *)dd;
+ rc = sps_register_event(pipe_handle, &event);
+ if (rc) {
+ dev_err(dd->dev, "%s: Failed to register BAM EOT event",
+ __func__);
+ goto register_err;
+ }
+
+ pipe->handle = pipe_handle;
+ pipe->teardown_required = true;
+ return 0;
+
+register_err:
+ sps_disconnect(pipe_handle);
+connect_err:
+ dma_free_coherent(dd->dev, pipe_conf->desc.size,
+ pipe_conf->desc.base, pipe_conf->desc.phys_base);
+config_err:
+ sps_free_endpoint(pipe_handle);
+
+ return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+ msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+ msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+ if (dd->bam.deregister_required) {
+ sps_deregister_bam_device(dd->bam.handle);
+ dd->bam.deregister_required = false;
+ }
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+ struct sps_bam_props bam_props = {0};
+ u32 bam_handle;
+ int rc = 0;
+
+ rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+ if (rc || !bam_handle) {
+ bam_props.phys_addr = dd->bam.phys_addr;
+ bam_props.virt_addr = dd->bam.base;
+ bam_props.irq = dd->bam.irq;
+ bam_props.manage = SPS_BAM_MGR_LOCAL;
+ bam_props.summing_threshold = 0x10;
+
+ rc = sps_register_bam_device(&bam_props, &bam_handle);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to register BAM device",
+ __func__);
+ return rc;
+ }
+ dd->bam.deregister_required = true;
+ }
+
+ dd->bam.handle = bam_handle;
+
+ rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to init producer BAM-pipe",
+ __func__);
+ goto bam_init_error;
+ }
+
+ rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+ if (rc) {
+ dev_err(dd->dev,
+ "%s: Failed to init consumer BAM-pipe",
+ __func__);
+ goto bam_init_error;
+ }
+
+ return 0;
+
+bam_init_error:
+ msm_spi_bam_teardown(dd);
+ return rc;
+}
+
+static __init int msm_spi_dmov_init(struct msm_spi *dd)
{
dmov_box *box;
u32 cache_line = dma_get_cache_alignment();
@@ -1811,10 +2276,15 @@
return 0;
}
-struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
+/**
+ * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
+ */
+struct msm_spi_platform_data *
+__init msm_spi_dt_to_pdata(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct msm_spi_platform_data *pdata;
+ int rc;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
@@ -1827,9 +2297,76 @@
of_property_read_u32(node, "infinite_mode",
&pdata->infinite_mode);
+ pdata->ver_reg_exists = of_property_read_bool(node
+ , "qcom,ver-reg-exists");
+
+ pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
+
+ if (pdata->use_bam) {
+ rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
+ &pdata->bam_consumer_pipe_index);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+ pdata->use_bam = false;
+ }
+
+ rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
+ &pdata->bam_producer_pipe_index);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "missing qcom,bam-producer-pipe-index entry in device-tree\n");
+ pdata->use_bam = false;
+ }
+ }
return pdata;
}
+static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+ u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+ return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+ : SPI_QUP_VERSION_NONE;
+}
+
+static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
+ struct platform_device *pdev, struct spi_master *master)
+{
+ struct resource *resource;
+ size_t bam_mem_size;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "spi_bam_physical");
+ if (!resource) {
+ dev_warn(&pdev->dev,
+ "%s: Missing spi_bam_physical entry in DT",
+ __func__);
+ return -ENXIO;
+ }
+
+ dd->bam.phys_addr = resource->start;
+ bam_mem_size = resource_size(resource);
+ dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+ bam_mem_size);
+ if (!dd->bam.base) {
+ dev_warn(&pdev->dev,
+ "%s: Failed to ioremap(spi_bam_physical)",
+ __func__);
+ return -ENXIO;
+ }
+
+ dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+ if (dd->bam.irq < 0) {
+ dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
+ __func__);
+ return -EINVAL;
+ }
+
+ dd->dma_init = msm_spi_bam_init;
+ dd->dma_teardown = msm_spi_bam_teardown;
+ return 0;
+}
+
static int __init msm_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -1926,21 +2463,39 @@
goto skip_dma_resources;
}
}
- resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (resource) {
- dd->rx_dma_chan = resource->start;
- dd->tx_dma_chan = resource->end;
- resource = platform_get_resource(pdev, IORESOURCE_DMA,
- 1);
- if (!resource) {
- rc = -ENXIO;
- goto err_probe_res;
- }
+ if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+ resource = platform_get_resource(pdev,
+ IORESOURCE_DMA, 0);
+ if (resource) {
+ dd->rx_dma_chan = resource->start;
+ dd->tx_dma_chan = resource->end;
+ resource = platform_get_resource(pdev,
+ IORESOURCE_DMA, 1);
+ if (!resource) {
+ rc = -ENXIO;
+ goto err_probe_res;
+ }
- dd->rx_dma_crci = resource->start;
- dd->tx_dma_crci = resource->end;
+ dd->rx_dma_crci = resource->start;
+ dd->tx_dma_crci = resource->end;
+ dd->use_dma = 1;
+ master->dma_alignment =
+ dma_get_cache_alignment();
+ dd->dma_init = msm_spi_dmov_init ;
+ dd->dma_teardown = msm_spi_dmov_teardown;
+ }
+ } else {
+ if (!dd->pdata->use_bam)
+ goto skip_dma_resources;
+
+ rc = msm_spi_bam_get_resources(dd, pdev, master);
+ if (rc) {
+ dev_warn(dd->dev,
+ "%s: Faild to get BAM resources",
+ __func__);
+ goto skip_dma_resources;
+ }
dd->use_dma = 1;
- master->dma_alignment = dma_get_cache_alignment();
}
}
@@ -1968,6 +2523,15 @@
goto err_probe_reqmem;
}
+ if (pdata && pdata->ver_reg_exists) {
+ enum msm_spi_qup_version ver =
+ msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+ if (dd->qup_ver != ver)
+ dev_warn(&pdev->dev,
+ "%s: HW version different then initially assumed by probe",
+ __func__);
+ }
+
if (pdata && pdata->rsl_id) {
struct remote_mutex_id rmid;
rmid.r_spinlock_id = pdata->rsl_id;
@@ -1984,7 +2548,7 @@
dd->use_rlock = 1;
dd->pm_lat = pdata->pm_lat;
pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ PM_QOS_DEFAULT_VALUE);
}
mutex_lock(&dd->core_lock);
@@ -2026,13 +2590,16 @@
}
pclk_enabled = 1;
- rc = msm_spi_configure_gsbi(dd, pdev);
- if (rc)
- goto err_probe_gsbi;
+ /* GSBI dose not exists on B-family MSM-chips */
+ if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+ rc = msm_spi_configure_gsbi(dd, pdev);
+ if (rc)
+ goto err_probe_gsbi;
+ }
msm_spi_calculate_fifo_size(dd);
if (dd->use_dma) {
- rc = msm_spi_init_dma(dd);
+ rc = dd->dma_init(dd);
if (rc)
goto err_probe_dma;
}
@@ -2091,7 +2658,7 @@
err_probe_reg_master:
err_probe_irq:
err_probe_state:
- msm_spi_teardown_dma(dd);
+ dd->dma_teardown(dd);
err_probe_dma:
err_probe_gsbi:
if (pclk_enabled)
@@ -2174,8 +2741,7 @@
spi_debugfs_exit(dd);
sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
- msm_spi_teardown_dma(dd);
-
+ dd->dma_teardown(dd);
clk_put(dd->clk);
clk_put(dd->pclk);
destroy_workqueue(dd->workqueue);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index a0dee34..62f1830 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -41,8 +41,13 @@
#define GSBI_CTRL_REG 0x0
#define GSBI_SPI_CONFIG 0x30
+/* B-family only registers */
#define QUP_HARDWARE_VER 0x0030
+#define QUP_HARDWARE_VER_2_1_1 0X20010001
#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG 0x200
+
#define QUP_ERROR_FLAGS 0x0308
#define SPI_CONFIG QSD_REG(0x0000) QUP_REG(0x0300)
@@ -73,6 +78,7 @@
#define SPI_NO_OUTPUT 0x00000040
#define SPI_CFG_LOOPBACK 0x00000100
#define SPI_CFG_N 0x0000001F
+#define SPI_EN_EXT_OUT_FLAG 0x00010000
/* SPI_IO_CONTROL fields */
#define SPI_IO_C_FORCE_CS 0x00000800
@@ -148,8 +154,18 @@
/* Data Mover commands should be aligned to 64 bit(8 bytes) */
#define DM_BYTE_ALIGN 8
-#define SPI_QUP_VERSION_NONE 0x0
-#define SPI_QUP_VERSION_BFAM 0x2
+enum msm_spi_qup_version {
+ SPI_QUP_VERSION_NONE = 0x0,
+ SPI_QUP_VERSION_BFAM = 0x2,
+};
+
+enum msm_spi_pipe_direction {
+ SPI_BAM_CONSUMER_PIPE = 0x0,
+ SPI_BAM_PRODUCER_PIPE = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM 32
+#define SPI_MAX_TRFR_BTWN_RESETS ((64 * 1024) - 16) /* 64KB - 16byte */
static char const * const spi_rsrcs[] = {
"spi_clk",
@@ -231,6 +247,22 @@
};
#endif
+struct msm_spi_bam_pipe {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ bool teardown_required;
+};
+
+struct msm_spi_bam {
+ void __iomem *base;
+ u32 phys_addr;
+ u32 handle;
+ u32 irq;
+ struct msm_spi_bam_pipe prod;
+ struct msm_spi_bam_pipe cons;
+ bool deregister_required;
+};
+
struct msm_spi {
u8 *read_buf;
const u8 *write_buf;
@@ -244,8 +276,8 @@
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct completion transfer_complete;
- struct clk *clk;
- struct clk *pclk;
+ struct clk *clk; /* core clock */
+ struct clk *pclk; /* interface clock */
unsigned long mem_phys_addr;
size_t mem_size;
int input_fifo_size;
@@ -273,6 +305,9 @@
int tx_dma_crci;
int rx_dma_chan;
int rx_dma_crci;
+ int (*dma_init) (struct msm_spi *dd);
+ void (*dma_teardown) (struct msm_spi *dd);
+ struct msm_spi_bam bam;
/* Data Mover Commands */
struct spi_dmov_cmd *tx_dmov_cmd;
struct spi_dmov_cmd *rx_dmov_cmd;
@@ -321,7 +356,7 @@
int spi_gpios[ARRAY_SIZE(spi_rsrcs)];
/* SPI CS GPIOs for each slave */
struct spi_cs_gpio cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
- int qup_ver;
+ enum msm_spi_qup_version qup_ver;
int max_trfr_len;
};
@@ -333,7 +368,7 @@
enum msm_spi_state state);
static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
-static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
static inline void msm_spi_disable_irqs(struct msm_spi *dd)
@@ -385,7 +420,7 @@
static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
-static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
{
msm_spi_write_word_to_fifo(dd);
@@ -441,16 +476,18 @@
writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
}
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
-/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
{
u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
- msm_spi_add_configs(dd, &qup_config, bpw-1);
- writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
- dd->base + QUP_CONFIG);
+ msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+ writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
}
static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
@@ -482,12 +519,22 @@
static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
{
- writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+ writel_relaxed(
+ SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+ dd->base + SPI_ERROR_FLAGS_EN);
+ else
+ writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
}
static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
{
- writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+ writel_relaxed(
+ SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+ dd->base + SPI_ERROR_FLAGS);
+ else
+ writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
}
#endif
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
index ec04369..4568933 100644
--- a/drivers/thermal/pm8xxx-tm.c
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -33,29 +33,32 @@
#include <linux/msm_adc.h>
/* Register TEMP_ALARM_CTRL bits */
-#define TEMP_ALARM_CTRL_ST3_SD 0x80
-#define TEMP_ALARM_CTRL_ST2_SD 0x40
-#define TEMP_ALARM_CTRL_STATUS_MASK 0x30
-#define TEMP_ALARM_CTRL_STATUS_SHIFT 4
-#define TEMP_ALARM_CTRL_THRESH_MASK 0x0C
-#define TEMP_ALARM_CTRL_THRESH_SHIFT 2
-#define TEMP_ALARM_CTRL_OVRD_ST3 0x02
-#define TEMP_ALARM_CTRL_OVRD_ST2 0x01
-#define TEMP_ALARM_CTRL_OVRD_MASK 0x03
+#define TEMP_ALARM_CTRL_ST3_SD 0x80
+#define TEMP_ALARM_CTRL_ST2_SD 0x40
+#define TEMP_ALARM_CTRL_STATUS_MASK 0x30
+#define TEMP_ALARM_CTRL_STATUS_SHIFT 4
+#define TEMP_ALARM_CTRL_THRESH_MASK 0x0C
+#define TEMP_ALARM_CTRL_THRESH_SHIFT 2
+#define TEMP_ALARM_CTRL_OVRD_ST3 0x02
+#define TEMP_ALARM_CTRL_OVRD_ST2 0x01
+#define TEMP_ALARM_CTRL_OVRD_MASK 0x03
-#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
-#define TEMP_STAGE_HYSTERESIS 2000
+#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS 2000
-#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
-#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
/* Register TEMP_ALARM_PWM bits */
-#define TEMP_ALARM_PWM_EN_MASK 0xC0
-#define TEMP_ALARM_PWM_EN_SHIFT 6
-#define TEMP_ALARM_PWM_PER_PRE_MASK 0x38
-#define TEMP_ALARM_PWM_PER_PRE_SHIFT 3
-#define TEMP_ALARM_PWM_PER_DIV_MASK 0x07
-#define TEMP_ALARM_PWM_PER_DIV_SHIFT 0
+#define TEMP_ALARM_PWM_EN_MASK 0xC0
+#define TEMP_ALARM_PWM_EN_NEVER 0x00
+#define TEMP_ALARM_PWM_EN_SLEEP_B 0x40
+#define TEMP_ALARM_PWM_EN_PWM 0x80
+#define TEMP_ALARM_PWM_EN_ALWAYS 0xC0
+#define TEMP_ALARM_PWM_PER_PRE_MASK 0x38
+#define TEMP_ALARM_PWM_PER_PRE_SHIFT 3
+#define TEMP_ALARM_PWM_PER_DIV_MASK 0x07
+#define TEMP_ALARM_PWM_PER_DIV_SHIFT 0
/* Trips: from critical to less critical */
#define TRIP_STAGE3 0
@@ -516,16 +519,15 @@
return rc;
/*
- * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
- * helps cut down on the number of unnecessary interrupts fired when
- * changing between thermal stages. Also, Enable the over temperature
- * PWM whenever the PMIC is enabled.
+ * Set the PMIC temperature alarm module to be always on. This ensures
+ * that die temperature monitoring is active even if CXO is disabled
+ * (i.e. when sleep_b is low). This is necessary since CXO can be
+ * disabled while the system is still heavily loaded. Also, using
+ * the alway-on instead of PWM-enabled configurations ensures that the
+ * die temperature can be measured by the PMIC ADC without reconfiguring
+ * the temperature alarm module first.
*/
- reg = (1 << TEMP_ALARM_PWM_EN_SHIFT)
- | (3 << TEMP_ALARM_PWM_PER_PRE_SHIFT)
- | (3 << TEMP_ALARM_PWM_PER_DIV_SHIFT);
-
- rc = pm8xxx_tm_write_pwm(chip, reg);
+ rc = pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_ALWAYS);
return rc;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 84cd3e7..7a0e32b 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -391,8 +391,6 @@
struct msm_hs_port *msm_uport;
struct device *dev;
- struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
-
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
@@ -402,10 +400,6 @@
msm_uport = &q_uart_port[pdev->id];
dev = msm_uport->uport.dev;
- if (pdata && pdata->gpio_config)
- if (pdata->gpio_config(0))
- dev_err(dev, "GPIO config error\n");
-
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
debugfs_remove(msm_uport->loopback_dir);
@@ -1646,6 +1640,9 @@
unsigned long flags;
unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ const struct msm_serial_hs_platform_data *pdata =
+ pdev->dev.platform_data;
struct circ_buf *tx_buf = &uport->state->xmit;
struct msm_hs_tx *tx = &msm_uport->tx;
@@ -1665,6 +1662,10 @@
return ret;
}
+ if (pdata && pdata->gpio_config)
+ if (unlikely(pdata->gpio_config(1)))
+ dev_err(uport->dev, "Cannot configure gpios\n");
+
/* Set auto RFR Level */
data = msm_hs_read(uport, UARTDM_MR1_ADDR);
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
@@ -1945,10 +1946,6 @@
if (unlikely(msm_uport->wakeup.irq < 0))
return -ENXIO;
- if (pdata->gpio_config)
- if (unlikely(pdata->gpio_config(1)))
- dev_err(uport->dev, "Cannot configure"
- "gpios\n");
}
resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -2086,6 +2083,9 @@
unsigned int data;
unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct platform_device *pdev = to_platform_device(uport->dev);
+ const struct msm_serial_hs_platform_data *pdata =
+ pdev->dev.platform_data;
if (msm_uport->tx.dma_in_flight) {
spin_lock_irqsave(&uport->lock, flags);
@@ -2148,6 +2148,10 @@
free_irq(uport->irq, msm_uport);
if (use_low_power_wakeup(msm_uport))
free_irq(msm_uport->wakeup.irq, msm_uport);
+
+ if (pdata && pdata->gpio_config)
+ if (pdata->gpio_config(0))
+ dev_err(uport->dev, "GPIO config error\n");
}
static void __exit msm_serial_hs_exit(void)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 2f3f83d..cc9ffaa 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -1369,9 +1369,12 @@
else
line = pdev->id;
- /* Use line number from device tree if present */
- if (pdev->dev.of_node)
- of_property_read_u32(pdev->dev.of_node, "cell-index", &line);
+ /* Use line number from device tree alias if present */
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ line = ret;
+ }
if (unlikely(line < 0 || line >= UART_NR))
return -ENXIO;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4073fc8..4865cdd 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -36,6 +36,7 @@
#include <linux/power_supply.h>
#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
#include <mach/msm_xo.h>
#include <mach/msm_bus.h>
@@ -141,8 +142,6 @@
struct regulator *hsusb_vddcx;
struct regulator *ssusb_1p8;
struct regulator *ssusb_vddcx;
- enum usb_vdd_type ss_vdd_type;
- enum usb_vdd_type hs_vdd_type;
struct dwc3_ext_xceiv ext_xceiv;
bool resume_pending;
atomic_t pm_suspended;
@@ -162,6 +161,9 @@
unsigned int online;
unsigned int host_mode;
unsigned int current_max;
+ unsigned int vdd_no_vol_level;
+ unsigned int vdd_low_vol_level;
+ unsigned int vdd_high_vol_level;
bool vbus_active;
};
@@ -177,23 +179,6 @@
#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
-#define USB_PHY_VDD_DIG_VOL_NONE 0 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-
-static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
- { /* VDD_CX CORNER Voting */
- [VDD_NONE] = RPM_VREG_CORNER_NONE,
- [VDD_MIN] = RPM_VREG_CORNER_NOMINAL,
- [VDD_MAX] = RPM_VREG_CORNER_HIGH,
- },
- { /* VDD_CX Voltage Voting */
- [VDD_NONE] = USB_PHY_VDD_DIG_VOL_NONE,
- [VDD_MIN] = USB_PHY_VDD_DIG_VOL_MIN,
- [VDD_MAX] = USB_PHY_VDD_DIG_VOL_MAX,
- },
-};
-
static struct dwc3_msm *context;
static u64 dwc3_msm_dma_mask = DMA_BIT_MASK(64);
@@ -858,12 +843,11 @@
/* HSPHY */
static int dwc3_hsusb_config_vddcx(int high)
{
- int min_vol, ret;
+ int min_vol, max_vol, ret;
struct dwc3_msm *dwc = context;
- enum usb_vdd_type vdd_type = context->hs_vdd_type;
- int max_vol = vdd_val[vdd_type][VDD_MAX];
- min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+ max_vol = dwc->vdd_high_vol_level;
+ min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
ret = regulator_set_voltage(dwc->hsusb_vddcx, min_vol, max_vol);
if (ret) {
dev_err(dwc->dev, "unable to set voltage for HSUSB_VDDCX\n");
@@ -983,12 +967,11 @@
/* SSPHY */
static int dwc3_ssusb_config_vddcx(int high)
{
- int min_vol, ret;
+ int min_vol, max_vol, ret;
struct dwc3_msm *dwc = context;
- enum usb_vdd_type vdd_type = context->ss_vdd_type;
- int max_vol = vdd_val[vdd_type][VDD_MAX];
- min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+ max_vol = dwc->vdd_high_vol_level;
+ min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
ret = regulator_set_voltage(dwc->ssusb_vddcx, min_vol, max_vol);
if (ret) {
dev_err(dwc->dev, "unable to set voltage for SSUSB_VDDCX\n");
@@ -1615,6 +1598,8 @@
struct resource *res;
void __iomem *tcsr;
int ret = 0;
+ int len = 0;
+ u32 tmp[3];
msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
if (!msm) {
@@ -1689,19 +1674,26 @@
}
clk_prepare_enable(msm->ref_clk);
+
+ of_get_property(node, "qcom,vdd-voltage-level", &len);
+ if (len == sizeof(tmp)) {
+ of_property_read_u32_array(node, "qcom,vdd-voltage-level",
+ tmp, len/sizeof(*tmp));
+ msm->vdd_no_vol_level = tmp[0];
+ msm->vdd_low_vol_level = tmp[1];
+ msm->vdd_high_vol_level = tmp[2];
+ } else {
+ dev_err(&pdev->dev, "no qcom,vdd-voltage-level property\n");
+ ret = -EINVAL;
+ goto disable_ref_clk;
+ }
+
/* SS PHY */
- msm->ss_vdd_type = VDDCX_CORNER;
msm->ssusb_vddcx = devm_regulator_get(&pdev->dev, "ssusb_vdd_dig");
if (IS_ERR(msm->ssusb_vddcx)) {
- msm->ssusb_vddcx = devm_regulator_get(&pdev->dev,
- "SSUSB_VDDCX");
- if (IS_ERR(msm->ssusb_vddcx)) {
- dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
- ret = PTR_ERR(msm->ssusb_vddcx);
- goto disable_ref_clk;
- }
- msm->ss_vdd_type = VDDCX;
- dev_dbg(&pdev->dev, "ss_vdd_type: VDDCX\n");
+ dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
+ ret = PTR_ERR(msm->ssusb_vddcx);
+ goto disable_ref_clk;
}
ret = dwc3_ssusb_config_vddcx(1);
@@ -1729,18 +1721,11 @@
}
/* HS PHY */
- msm->hs_vdd_type = VDDCX_CORNER;
msm->hsusb_vddcx = devm_regulator_get(&pdev->dev, "hsusb_vdd_dig");
if (IS_ERR(msm->hsusb_vddcx)) {
- msm->hsusb_vddcx = devm_regulator_get(&pdev->dev,
- "HSUSB_VDDCX");
- if (IS_ERR(msm->hsusb_vddcx)) {
- dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
- ret = PTR_ERR(msm->ssusb_vddcx);
- goto disable_ss_ldo;
- }
- msm->hs_vdd_type = VDDCX;
- dev_dbg(&pdev->dev, "hs_vdd_type: VDDCX\n");
+ dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
+ ret = PTR_ERR(msm->hsusb_vddcx);
+ goto disable_ss_ldo;
}
ret = dwc3_hsusb_config_vddcx(1);
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 4357e0d..2f94675 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -149,7 +149,7 @@
/* Super speed support */
static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
- .bLength = sizeof rmnet_ss_notify_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
@@ -168,7 +168,7 @@
};
static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
- .bLength = sizeof rmnet_ss_in_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
@@ -185,7 +185,7 @@
};
static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
- .bLength = sizeof rmnet_ss_out_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 43347b3..74dba07 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -246,7 +246,7 @@
#ifdef CONFIG_MODEM_SUPPORT
static struct usb_endpoint_descriptor gser_ss_notify_desc = {
- .bLength = sizeof gser_ss_notify_desc,
+ .bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index 4931c1e..bba2ca6 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -332,6 +332,7 @@
net_dev = dev_get_by_name(&init_net, netname);
if (net_dev) {
+ dev_put(net_dev);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
@@ -355,6 +356,10 @@
/* Extract the eth_qc_dev from the net device */
net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ dev_put(net_dev);
dev = netdev_priv(net_dev);
if (!dev)
@@ -400,6 +405,10 @@
/* Extract the eth_qc_dev from the net device */
net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return;
+
+ dev_put(net_dev);
dev = netdev_priv(net_dev);
if (!dev)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index fff9465..1a75bd7 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -337,7 +337,7 @@
/* caller has locked the root hub, and should reset/reinit on error */
-static int ehci_bus_resume (struct usb_hcd *hcd)
+static int __maybe_unused ehci_bus_resume(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 2d69a98..8c22f8e 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -76,6 +76,7 @@
struct clk *phy_clk;
struct clk *cal_clk;
struct regulator *hsic_vddcx;
+ struct regulator *hsic_gdsc;
bool async_int;
atomic_t in_lpm;
struct wake_lock wlock;
@@ -103,6 +104,8 @@
struct msm_hsic_hcd *__mehci;
static bool debug_bus_voting_enabled = true;
+static u64 ehci_msm_hsic_dma_mask = DMA_BIT_MASK(32);
+
static unsigned int enable_payload_log = 1;
module_param(enable_payload_log, uint, S_IRUGO | S_IWUSR);
@@ -393,6 +396,35 @@
}
+/* Global Distributed Switch Controller (GDSC) init */
+static int msm_hsic_init_gdsc(struct msm_hsic_hcd *mehci, int init)
+{
+ int ret = 0;
+
+ if (IS_ERR(mehci->hsic_gdsc))
+ return 0;
+
+ if (!mehci->hsic_gdsc) {
+ mehci->hsic_gdsc = devm_regulator_get(mehci->dev,
+ "HSIC_GDSC");
+ if (IS_ERR(mehci->hsic_gdsc))
+ return 0;
+ }
+
+ if (init) {
+ ret = regulator_enable(mehci->hsic_gdsc);
+ if (ret) {
+ dev_err(mehci->dev, "unable to enable hsic gdsc\n");
+ return ret;
+ }
+ } else {
+ regulator_disable(mehci->hsic_gdsc);
+ }
+
+ return 0;
+
+}
+
static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -563,18 +595,22 @@
#define HSIC_PAD_CALIBRATION 0xA8
#define HSIC_GPIO_PAD_VAL 0x0A0AAA10
#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
-static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+
+static void msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
- int ret;
- struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
msm_hsic_clk_reset(mehci);
/* select ulpi phy */
writel_relaxed(0x80000000, USB_PORTSC);
-
mb();
+}
+
+static int msm_hsic_start(struct msm_hsic_hcd *mehci)
+{
+ struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+ int ret;
/* HSIC init sequence when HSIC signals (Strobe/Data) are
routed via GPIOs */
@@ -635,6 +671,15 @@
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
#ifdef CONFIG_PM_SLEEP
+static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+{
+ /* reset HSIC phy */
+ msm_hsic_phy_reset(mehci);
+
+ /* HSIC init procedure (caliberation) */
+ return msm_hsic_start(mehci);
+}
+
static int msm_hsic_suspend(struct msm_hsic_hcd *mehci)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -1534,6 +1579,11 @@
dev_dbg(&pdev->dev, "ehci_msm-hsic probe\n");
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &ehci_msm_hsic_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
/* After parent device's probe is executed, it will be put in suspend
* mode. When child device's probe is called, driver core is not
* resuming parent device due to which parent will be in suspend even
@@ -1588,6 +1638,13 @@
if (pdata)
mehci->ehci.log2_irq_thresh = pdata->log2_irq_thresh;
+ ret = msm_hsic_init_gdsc(mehci, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize GDSC\n");
+ ret = -ENODEV;
+ goto put_hcd;
+ }
+
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ,
"peripheral_status_irq");
@@ -1616,11 +1673,8 @@
init_completion(&mehci->rt_completion);
init_completion(&mehci->gpt0_completion);
- ret = msm_hsic_reset(mehci);
- if (ret) {
- dev_err(&pdev->dev, "unable to initialize PHY\n");
- goto deinit_vddcx;
- }
+
+ msm_hsic_phy_reset(mehci);
ehci_wq = create_singlethread_workqueue("ehci_wq");
if (!ehci_wq) {
@@ -1634,7 +1688,13 @@
ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (ret) {
dev_err(&pdev->dev, "unable to register HCD\n");
- goto unconfig_gpio;
+ goto destroy_wq;
+ }
+
+ ret = msm_hsic_start(mehci);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize PHY\n");
+ goto destroy_wq;
}
device_init_wakeup(&pdev->dev, 1);
@@ -1710,11 +1770,11 @@
return 0;
-unconfig_gpio:
+destroy_wq:
destroy_workqueue(ehci_wq);
- msm_hsic_config_gpios(mehci, 0);
deinit_vddcx:
msm_hsic_init_vddcx(mehci, 0);
+ msm_hsic_init_gdsc(mehci, 0);
deinit_clocks:
msm_hsic_init_clocks(mehci, 0);
unmap:
@@ -1763,6 +1823,7 @@
usb_remove_hcd(hcd);
msm_hsic_config_gpios(mehci, 0);
msm_hsic_init_vddcx(mehci, 0);
+ msm_hsic_init_gdsc(mehci, 0);
msm_hsic_init_clocks(mehci, 0);
wake_lock_destroy(&mehci->wlock);
@@ -1881,7 +1942,11 @@
msm_hsic_runtime_idle)
};
#endif
-
+static const struct of_device_id hsic_host_dt_match[] = {
+ { .compatible = "qcom,hsic-host",
+ },
+ {}
+};
static struct platform_driver ehci_msm_hsic_driver = {
.probe = ehci_hsic_msm_probe,
.remove = __devexit_p(ehci_hsic_msm_remove),
@@ -1890,5 +1955,6 @@
#ifdef CONFIG_PM
.pm = &msm_hsic_dev_pm_ops,
#endif
+ .of_match_table = hsic_host_dt_match,
},
};
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 2f4fac1..a707705 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -589,7 +589,7 @@
int mdp4_overlay_play_wait(struct fb_info *info,
struct msmfb_overlay_data *req);
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
-int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_overlay_commit(struct fb_info *info);
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
void mdp4_overlay_dma_commit(int mixer);
void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
@@ -988,9 +988,15 @@
{
/* empty */
}
+static inline int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+ int cndx, int wait)
+{
+ return 0;
+}
#else
void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_wfd_init(int cndx);
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd, int cndx, int wait);
#endif
#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 11952f3..ae3ffe6 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1973,6 +1973,7 @@
}
/* alpha channel is lost on VG pipe when using QSEED or M/N */
if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
+ s_pipe->alpha_enable &&
((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
!(s_pipe->op_mode & (MDP4_OP_SCALEX_PIXEL_RPT |
@@ -2085,7 +2086,9 @@
outpdw(overlay_base + off + 0x108, blend->fg_alpha);
outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
- if (mdp_rev >= MDP_REV_42)
+ if (mdp_rev >= MDP_REV_42 ||
+ ctrl->panel_mode & MDP4_PANEL_MDDI ||
+ ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
outpdw(overlay_base + off + 0x104, blend->op);
outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
@@ -3496,8 +3499,9 @@
return ret;
}
-int mdp4_overlay_commit(struct fb_info *info, int mixer)
+int mdp4_overlay_commit(struct fb_info *info)
{
+ int ret = 0;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (mfd == NULL)
@@ -3506,34 +3510,37 @@
if (!mfd->panel_power_on) /* suspended */
return -EINVAL;
- if (mixer >= MDP4_MIXER_MAX)
- return -EPERM;
-
mutex_lock(&mfd->dma->ov_mutex);
mdp4_overlay_mdp_perf_upd(mfd, 1);
- if (mixer == MDP4_MIXER0) {
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- /* cndx = 0 */
- mdp4_dsi_cmd_pipe_commit(0, 1);
- } else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- /* cndx = 0 */
- mdp4_dsi_video_pipe_commit(0, 1);
- } else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- /* cndx = 0 */
- mdp4_lcdc_pipe_commit(0, 1);
- }
- } else if (mixer == MDP4_MIXER1) {
- if (ctrl->panel_mode & MDP4_PANEL_DTV)
- mdp4_dtv_pipe_commit(0, 1);
+ switch (mfd->panel.type) {
+ case MIPI_CMD_PANEL:
+ mdp4_dsi_cmd_pipe_commit(0, 1);
+ break;
+ case MIPI_VIDEO_PANEL:
+ mdp4_dsi_video_pipe_commit(0, 1);
+ break;
+ case LCDC_PANEL:
+ mdp4_lcdc_pipe_commit(0, 1);
+ break;
+ case DTV_PANEL:
+ mdp4_dtv_pipe_commit(0, 1);
+ break;
+ case WRITEBACK_PANEL:
+ mdp4_wfd_pipe_commit(mfd, 0, 1);
+ break;
+ default:
+ pr_err("Panel Not Supported for Commit");
+ ret = -EINVAL;
+ break;
}
mdp4_overlay_mdp_perf_upd(mfd, 0);
mutex_unlock(&mfd->dma->ov_mutex);
- return 0;
+ return ret;
}
struct msm_iommu_ctx {
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 6c2b1f6..aa50d94 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -88,6 +88,10 @@
}
static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list *node);
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list **wfdnode);
int mdp4_overlay_writeback_on(struct platform_device *pdev)
{
@@ -317,7 +321,8 @@
static void mdp4_wfd_wait4ov(int cndx);
-int mdp4_wfd_pipe_commit(void)
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+ int cndx, int wait)
{
int i, undx;
int mixer = 0;
@@ -327,8 +332,9 @@
struct mdp4_overlay_pipe *real_pipe;
unsigned long flags;
int cnt = 0;
+ struct msmfb_writeback_data_list *node = NULL;
- vctrl = &vsync_ctrl_db[0];
+ vctrl = &vsync_ctrl_db[cndx];
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
@@ -346,6 +352,8 @@
vp->update_cnt = 0; /* reset */
mutex_unlock(&vctrl->update_lock);
+ mdp4_wfd_dequeue_update(mfd, &node);
+
/* free previous committed iommu back to pool */
mdp4_overlay_iommu_unmap_freelist(mixer);
@@ -383,6 +391,11 @@
mdp4_stat.overlay_commit[pipe->mixer_num]++;
+ if (wait)
+ mdp4_wfd_wait4ov(cndx);
+
+ mdp4_wfd_queue_wakeup(mfd, node);
+
return cnt;
}
@@ -444,7 +457,6 @@
void mdp4_writeback_overlay(struct msm_fb_data_type *mfd)
{
- struct msmfb_writeback_data_list *node = NULL;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
@@ -456,36 +468,7 @@
vctrl = &vsync_ctrl_db[0];
pipe = vctrl->base_pipe;
- mutex_lock(&mfd->unregister_mutex);
- mutex_lock(&mfd->writeback_mutex);
- if (!list_empty(&mfd->writeback_free_queue)
- && mfd->writeback_state != WB_STOPING
- && mfd->writeback_state != WB_STOP) {
- node = list_first_entry(&mfd->writeback_free_queue,
- struct msmfb_writeback_data_list, active_entry);
- }
- if (node) {
- list_del(&(node->active_entry));
- node->state = IN_BUSY_QUEUE;
- mfd->writeback_active_cnt++;
- }
- mutex_unlock(&mfd->writeback_mutex);
-
- pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
-
- if (!pipe->ov_blt_addr) {
- pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
- (unsigned int)pipe->ov_blt_addr, node);
- mutex_unlock(&mfd->unregister_mutex);
- return;
- }
-
mutex_lock(&mfd->dma->ov_mutex);
- if (pipe && !pipe->ov_blt_addr) {
- pr_err("%s: no writeback buffer 0x%x\n", __func__,
- (unsigned int)pipe->ov_blt_addr);
- goto fail_no_blt_addr;
- }
if (pipe->pipe_type == OVERLAY_TYPE_RGB)
mdp4_wfd_pipe_queue(0, pipe);
@@ -493,26 +476,15 @@
mdp4_overlay_mdp_perf_upd(mfd, 1);
mdp_clk_ctrl(1);
- mdp4_overlay_writeback_update(mfd);
- mdp4_wfd_pipe_commit();
+ mdp4_wfd_pipe_commit(mfd, 0, 1);
mdp4_overlay_mdp_perf_upd(mfd, 0);
- mdp4_wfd_wait4ov(0);
mdp_clk_ctrl(0);
- mutex_lock(&mfd->writeback_mutex);
- list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
- mfd->writeback_active_cnt--;
- mutex_unlock(&mfd->writeback_mutex);
- wake_up(&mfd->wait_q);
-fail_no_blt_addr:
- /*NOTE: This api was removed
- mdp4_overlay_resource_release();*/
mutex_unlock(&mfd->dma->ov_mutex);
- mutex_unlock(&mfd->unregister_mutex);
- pr_debug("%s:-\n", __func__);
+
}
static int mdp4_overlay_writeback_register_buffer(
@@ -763,3 +735,68 @@
mutex_unlock(&mfd->unregister_mutex);
return rc;
}
+
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list **wfdnode)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ struct msmfb_writeback_data_list *node = NULL;
+
+ if (mfd && !mfd->panel_power_on)
+ return;
+
+ pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
+
+ vctrl = &vsync_ctrl_db[0];
+ pipe = vctrl->base_pipe;
+
+ mutex_lock(&mfd->unregister_mutex);
+ mutex_lock(&mfd->writeback_mutex);
+ if (!list_empty(&mfd->writeback_free_queue)
+ && mfd->writeback_state != WB_STOPING
+ && mfd->writeback_state != WB_STOP) {
+ node = list_first_entry(&mfd->writeback_free_queue,
+ struct msmfb_writeback_data_list, active_entry);
+ }
+ if (node) {
+ list_del(&(node->active_entry));
+ node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
+ }
+ mutex_unlock(&mfd->writeback_mutex);
+
+ pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+
+ if (!pipe->ov_blt_addr) {
+ pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
+ (unsigned int)pipe->ov_blt_addr, node);
+ mutex_unlock(&mfd->unregister_mutex);
+ return;
+ }
+
+ mdp4_overlay_writeback_update(mfd);
+
+ *wfdnode = node;
+
+ mutex_unlock(&mfd->unregister_mutex);
+}
+
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+ struct msmfb_writeback_data_list *node)
+{
+
+ if (mfd && !mfd->panel_power_on)
+ return;
+
+ if (node == NULL)
+ return;
+
+ pr_debug("%s: mfd=%x node: %p", __func__, (int)mfd, node);
+
+ mutex_lock(&mfd->writeback_mutex);
+ list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
+ mfd->writeback_active_cnt--;
+ mutex_unlock(&mfd->writeback_mutex);
+ wake_up(&mfd->wait_q);
+}
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 3c60c2b..d041125 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -21,6 +21,8 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <mach/iommu_domains.h>
+
#define MDSS_REG_WRITE(addr, val) writel_relaxed(val, mdss_res->mdp_base + addr)
#define MDSS_REG_READ(addr) readl_relaxed(mdss_res->mdp_base + addr)
@@ -34,6 +36,21 @@
MDSS_MAX_CLK
};
+enum mdss_iommu_domain_type {
+ MDSS_IOMMU_DOMAIN_SECURE,
+ MDSS_IOMMU_DOMAIN_UNSECURE,
+ MDSS_IOMMU_MAX_DOMAIN
+};
+
+struct mdss_iommu_map_type {
+ char *client_name;
+ char *ctx_name;
+ struct device *ctx;
+ struct msm_iova_partition partitions[1];
+ int npartitions;
+ int domain_idx;
+};
+
struct mdss_data_type {
u32 rev;
u32 mdp_rev;
@@ -72,8 +89,8 @@
u32 *mixer_type_map;
struct ion_client *iclient;
- int iommu_domain;
int iommu_attached;
+ struct mdss_iommu_map_type *iommu_map;
struct early_suspend early_suspend;
};
@@ -112,14 +129,14 @@
return mdss_res->iommu_attached;
}
-static inline int mdss_get_iommu_domain(void)
+static inline int mdss_get_iommu_domain(u32 type)
{
+ if (type >= MDSS_IOMMU_MAX_DOMAIN)
+ return -EINVAL;
+
if (!mdss_res)
return -ENODEV;
- return mdss_res->iommu_domain;
+ return mdss_res->iommu_map[type].domain_idx;
}
-
-int mdss_iommu_attach(void);
-int mdss_iommu_dettach(void);
#endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8f4f4d5..980ed46 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -327,6 +327,26 @@
return ret;
}
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_dsi_on(pdata);
+ break;
+ case MDSS_EVENT_BLANK:
+ rc = mdss_dsi_ctrl_unprepare(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_dsi_off(pdata);
+ break;
+ }
+ return rc;
+}
+
static int mdss_dsi_resource_initialized;
static int __devinit mdss_dsi_probe(struct platform_device *pdev)
@@ -476,9 +496,7 @@
if (!ctrl_pdata)
return -ENOMEM;
- (ctrl_pdata->panel_data).on = mdss_dsi_on;
- (ctrl_pdata->panel_data).off = mdss_dsi_off;
- (ctrl_pdata->panel_data).intf_unprepare = mdss_dsi_ctrl_unprepare;
+ ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
memcpy(&((ctrl_pdata->panel_data).panel_info),
&(panel_data->panel_info),
sizeof(struct mdss_panel_info));
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 125644e..8c3b1a8 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -1199,6 +1199,7 @@
{
int len;
int i;
+ int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
char *bp;
unsigned long size, addr;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -1229,7 +1230,7 @@
if (is_mdss_iommu_attached()) {
int ret = msm_iommu_map_contig_buffer(tp->dmap,
- mdss_get_iommu_domain(), 0,
+ mdss_get_iommu_domain(domain), 0,
size, SZ_4K, 0, &(addr));
if (IS_ERR_VALUE(ret)) {
pr_err("unable to map dma memory to iommu(%d)\n", ret);
@@ -1251,8 +1252,8 @@
wait_for_completion(&dsi_dma_comp);
if (is_mdss_iommu_attached())
- msm_iommu_unmap_contig_buffer(addr, mdss_get_iommu_domain(),
- 0, size);
+ msm_iommu_unmap_contig_buffer(addr,
+ mdss_get_iommu_domain(domain), 0, size);
dma_unmap_single(&dsi_dev, tp->dmap, size, DMA_TO_DEVICE);
tp->dmap = 0;
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index 1d7a6fe..1cf3101 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -353,6 +353,23 @@
return ret;
}
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_edp_on(pdata);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = mdss_edp_off(pdata);
+ break;
+ }
+ return rc;
+}
+
/*
* Converts from EDID struct to mdss_panel_info
*/
@@ -413,8 +430,7 @@
edp_drv->panel_data.panel_info.bl_min = 1;
edp_drv->panel_data.panel_info.bl_max = 255;
- edp_drv->panel_data.on = mdss_edp_on;
- edp_drv->panel_data.off = mdss_edp_off;
+ edp_drv->panel_data.event_handler = mdss_edp_event_handler;
edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
ret = mdss_register_panel(&edp_drv->panel_data);
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index b711fd9..4ec4046 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -325,6 +325,24 @@
return 0;
}
+static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+{
+ struct mdss_panel_data *pdata;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending event=%d for fb%d\n", e, mfd->index);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, e, NULL);
+
+ return 0;
+}
+
static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
@@ -334,6 +352,12 @@
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+ if (ret) {
+ pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_on = mfd->panel_power_on;
@@ -359,6 +383,12 @@
pr_debug("mdss_fb resume index=%d\n", mfd->index);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+ if (ret) {
+ pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
/* resume state var recover */
mfd->op_enable = mfd->suspend.op_enable;
@@ -691,6 +721,7 @@
size *= mfd->fb_page;
if (mfd->index == 0) {
+ int dom;
virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
if (!virt) {
pr_err("unable to alloc fbmem size=%u\n", size);
@@ -698,9 +729,9 @@
}
phys = memory_pool_node_paddr(virt);
if (is_mdss_iommu_attached()) {
- msm_iommu_map_contig_buffer(phys,
- mdss_get_iommu_domain(), 0, size, SZ_4K, 0,
- &(mfd->iova));
+ dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+ msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
+ 0, &(mfd->iova));
}
pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
size, virt, phys, mfd->index);
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 78f2b9a..b760388 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -93,6 +93,7 @@
u32 bl_scale;
u32 bl_min_lvl;
struct mutex lock;
+ struct mutex ov_lock;
struct platform_device *pdev;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 0c36c63..ab462f5 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -2035,6 +2035,21 @@
return rc;
} /* hdmi_tx_dev_init */
+static int hdmi_tx_event_handler(struct mdss_panel_data *panel_data,
+ int event, void *arg)
+{
+ int rc = 0;
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = hdmi_tx_power_on(panel_data);
+ break;
+ case MDSS_EVENT_TIMEGEN_OFF:
+ rc = hdmi_tx_power_off(panel_data);
+ break;
+ }
+ return rc;
+}
+
static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc = 0;
@@ -2044,8 +2059,7 @@
return -EINVAL;
}
- hdmi_ctrl->panel_data.on = hdmi_tx_power_on;
- hdmi_ctrl->panel_data.off = hdmi_tx_power_off;
+ hdmi_ctrl->panel_data.event_handler = hdmi_tx_event_handler;
hdmi_ctrl->video_resolution = DEFAULT_VIDEO_RESOLUTION;
rc = hdmi_tx_init_panel_info(hdmi_ctrl->video_resolution,
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 11b0831..bcb3aee 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -95,28 +95,29 @@
.name = "mdss_mdp",
};
-struct msm_iova_partition mdp_iommu_partitions[] = {
- {
- .start = SZ_128K,
- .size = SZ_2G - SZ_128K,
+struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
+ [MDSS_IOMMU_DOMAIN_UNSECURE] = {
+ .client_name = "mdp_ns",
+ .ctx_name = "mdp_0",
+ .partitions = {
+ {
+ .start = SZ_128K,
+ .size = SZ_1G - SZ_128K,
+ },
+ },
+ .npartitions = 1,
},
-};
-struct msm_iova_layout mdp_iommu_layout = {
- .client_name = "mdss_mdp",
- .partitions = mdp_iommu_partitions,
- .npartitions = ARRAY_SIZE(mdp_iommu_partitions),
-};
-
-struct {
- char *name;
- struct device *ctx;
-} mdp_iommu_ctx[] = {
- {
- .name = "mdp_0",
+ [MDSS_IOMMU_DOMAIN_SECURE] = {
+ .client_name = "mdp_secure",
+ .ctx_name = "mdp_1",
+ .partitions = {
+ {
+ .start = SZ_1G,
+ .size = SZ_1G,
+ },
+ },
+ .npartitions = 1,
},
- {
- .name = "mdp_1",
- }
};
struct mdss_hw mdss_mdp_hw = {
@@ -670,85 +671,103 @@
return 0;
}
-int mdss_iommu_attach(void)
+int mdss_iommu_attach(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
- int i, domain_idx;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- if (mdss_res->iommu_attached) {
+ if (mdata->iommu_attached) {
pr_warn("mdp iommu already attached\n");
return 0;
}
- domain_idx = mdss_get_iommu_domain();
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ WARN(1, "could not attach iommu client %s to ctx %s\n",
+ iomap->client_name, iomap->ctx_name);
+ continue;
+ }
+ iommu_attach_device(domain, iomap->ctx);
}
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
- if (iommu_attach_device(domain, mdp_iommu_ctx[i].ctx)) {
- WARN(1, "could not attach iommu domain %d to ctx %s\n",
- domain_idx, mdp_iommu_ctx[i].name);
- return -EINVAL;
- }
- }
- mdss_res->iommu_attached = true;
+ mdata->iommu_attached = true;
return 0;
}
-int mdss_iommu_dettach(void)
+int mdss_iommu_dettach(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
- int i, domain_idx;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- if (!mdss_res->iommu_attached) {
+ if (!mdata->iommu_attached) {
pr_warn("mdp iommu already dettached\n");
return 0;
}
- domain_idx = mdss_get_iommu_domain();
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ continue;
+ }
+ iommu_detach_device(domain, iomap->ctx);
}
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++)
- iommu_detach_device(domain, mdp_iommu_ctx[i].ctx);
- mdss_res->iommu_attached = false;
+ mdata->iommu_attached = false;
return 0;
}
-int mdss_iommu_init(void)
+int mdss_iommu_init(struct mdss_data_type *mdata)
{
+ struct msm_iova_layout layout;
struct iommu_domain *domain;
- int domain_idx, i;
+ struct mdss_iommu_map_type *iomap;
+ int i;
- domain_idx = msm_register_domain(&mdp_iommu_layout);
- if (IS_ERR_VALUE(domain_idx))
- return -EINVAL;
-
- domain = msm_get_iommu_domain(domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n", domain_idx);
- return -EINVAL;
+ if (mdata->iommu_map) {
+ pr_warn("iommu already initialized\n");
+ return 0;
}
- iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ iomap = &mdss_iommu_map[i];
- for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
- mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
- if (!mdp_iommu_ctx[i].ctx) {
+ layout.client_name = iomap->client_name;
+ layout.partitions = iomap->partitions;
+ layout.npartitions = iomap->npartitions;
+ layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
+
+ iomap->domain_idx = msm_register_domain(&layout);
+ if (IS_ERR_VALUE(iomap->domain_idx))
+ return -EINVAL;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ return -EINVAL;
+ }
+ iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+
+ iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
+ if (!iomap->ctx) {
pr_warn("unable to get iommu ctx(%s)\n",
- mdp_iommu_ctx[i].name);
+ iomap->ctx_name);
return -EINVAL;
}
}
- mdss_res->iommu_domain = domain_idx;
+
+ mdata->iommu_map = mdss_iommu_map;
return 0;
}
@@ -815,9 +834,9 @@
mdata->iclient = NULL;
}
- rc = mdss_iommu_init();
+ rc = mdss_iommu_init(mdata);
if (!IS_ERR_VALUE(rc))
- mdss_iommu_attach();
+ mdss_iommu_attach(mdata);
rc = mdss_hw_init(mdata);
@@ -934,11 +953,11 @@
if (on && !mdata->fs_ena) {
pr_debug("Enable MDP FS\n");
regulator_enable(mdata->fs);
- mdss_iommu_attach();
+ mdss_iommu_attach(mdata);
mdata->fs_ena = true;
} else if (!on && mdata->fs_ena) {
pr_debug("Disable MDP FS\n");
- mdss_iommu_dettach();
+ mdss_iommu_dettach(mdata);
regulator_disable(mdata->fs);
mdata->fs_ena = false;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 72871aa..2e92591 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -284,13 +284,13 @@
void mdss_mdp_clk_ctrl(int enable, int isr);
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd);
int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg);
struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index f660375..00f5874 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,11 @@
bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
+
+ if ((bus_ib_quota == 0) && (clk_rate > 0)) {
+ /* allocate min bw for panel cmds if mdp is active */
+ bus_ib_quota = SZ_16M;
+ }
mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
}
if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
@@ -531,9 +536,28 @@
return 0;
}
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
{
struct mdss_panel_data *pdata;
+ if (!ctl || !ctl->mfd)
+ return -ENODEV;
+
+ pdata = dev_get_platdata(&ctl->mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
+
+ if (pdata->event_handler)
+ return pdata->event_handler(pdata, event, arg);
+
+ return 0;
+}
+
+int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
u32 outsize, temp, off;
@@ -545,12 +569,6 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
if (mdss_mdp_ctl_init(mfd)) {
pr_err("unable to initialize ctl\n");
return -ENODEV;
@@ -568,6 +586,12 @@
ctl->power_on = true;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
+ if (ret) {
+ pr_err("panel power on failed ctl=%d\n", ctl->num);
+ goto start_fail;
+ }
+
if (ctl->start_fnc)
ret = ctl->start_fnc(ctl);
else
@@ -579,17 +603,6 @@
goto start_fail;
}
- /* request bus bandwidth for panel commands */
- ctl->clk_rate = MDP_CLK_DEFAULT_RATE;
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
- ret = pdata->on(pdata);
- if (ret) {
- pr_err("panel power on failed ctl=%d\n", ctl->num);
- goto panel_fail;
- }
-
pr_debug("ctl_num=%d\n", ctl->num);
mixer = ctl->mixer_left;
@@ -617,23 +630,18 @@
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
}
-panel_fail:
- if (ret && ctl->stop_fnc)
- ctl->stop_fnc(ctl);
+
start_fail:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&ctl->lock);
- if (ret) {
+ if (ret)
mdss_mdp_ctl_destroy(mfd);
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
- }
return ret;
}
int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd)
{
- struct mdss_panel_data *pdata;
struct mdss_mdp_ctl *ctl;
int ret = 0;
@@ -648,12 +656,6 @@
return -ENODEV;
}
- pdata = dev_get_platdata(&mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
-
ctl = mfd->ctl;
if (!ctl->power_on) {
@@ -663,43 +665,33 @@
pr_debug("ctl_num=%d\n", mfd->ctl->num);
- mdss_mdp_overlay_release_all(mfd);
-
- /* request bus bandwidth for panel commands */
- ctl->bus_ib_quota = SZ_1M;
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_lock(&ctl->lock);
- ctl->power_on = false;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (pdata->intf_unprepare)
- ret = pdata->intf_unprepare(pdata);
-
- if (ret)
- pr_err("%s: intf_unprepare failed\n", __func__);
-
if (ctl->stop_fnc)
ret = ctl->stop_fnc(ctl);
else
pr_warn("no stop func for ctl=%d\n", ctl->num);
- if (ret)
+ if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
-
- ret = pdata->off(pdata);
+ } else {
+ ctl->power_on = false;
+ ctl->play_cnt = 0;
+ ctl->clk_rate = 0;
+ mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
+ }
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- ctl->play_cnt = 0;
-
- mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
mutex_unlock(&ctl->lock);
- if (!mfd->ref_cnt)
+ if (!ret && !mfd->ref_cnt) {
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
+ WARN(ret, "unable to close intf %d\n", ctl->intf_num);
mdss_mdp_ctl_destroy(mfd);
+ }
return ret;
}
@@ -926,13 +918,16 @@
return -ENODEV;
}
- if (!ctl->power_on)
- return 0;
-
pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
- if (mutex_lock_interruptible(&ctl->lock))
- return -EINTR;
+ ret = mutex_lock_interruptible(&ctl->lock);
+ if (ret)
+ return ret;
+
+ if (!ctl->power_on) {
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
@@ -992,7 +987,7 @@
mutex_lock(&mdss_mdp_ctl_lock);
for (i = 0; i < MDSS_MDP_MAX_CTL; i++) {
ctl = &mdss_mdp_ctl_list[i];
- if ((ctl->power_on) &&
+ if ((ctl->power_on) && (ctl->mfd) &&
(ctl->mfd->index == fb_num)) {
if (ctl->mixer_left) {
mixer_id[mixer_cnt] = ctl->mixer_left->num;
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 1da30b8..b6ac126 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -166,6 +166,7 @@
#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
#define MDSS_MDP_REG_SSPP_FETCH_CONFIG 0x048
#define MDSS_MDP_REG_SSPP_VC1_RANGE 0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS 0x070
#define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR 0x0A4
#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 4d3fbf0..9508846 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -201,7 +201,7 @@
static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_video_ctx *ctx;
- int off;
+ int rc, off;
pr_debug("stop ctl=%d\n", ctl->num);
@@ -211,16 +211,27 @@
return -ENODEV;
}
- if (ctx->vsync_handler)
- mdss_mdp_video_set_vsync_handler(ctl, NULL);
-
if (ctx->timegen_en) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+ if (rc == -EBUSY) {
+ pr_debug("intf #%d busy don't turn off\n",
+ ctl->intf_num);
+ return rc;
+ }
+ WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ctx->timegen_en = false;
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_OFF, NULL);
+ WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
}
+ if (ctx->vsync_handler)
+ mdss_mdp_video_set_vsync_handler(ctl, NULL);
+
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
NULL, NULL);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
@@ -288,6 +299,7 @@
static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_video_ctx *ctx;
+ int rc;
pr_debug("kickoff ctl=%d\n", ctl->num);
@@ -306,15 +318,23 @@
if (!ctx->timegen_en) {
int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+ WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
- ctx->timegen_en = true;
wmb();
}
wait_for_completion(&ctx->vsync_comp);
+
+ if (!ctx->timegen_en) {
+ ctx->timegen_en = true;
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_ON, NULL);
+ WARN(rc, "intf %d timegen on error (%d)\n", ctl->intf_num, rc);
+ }
if (!ctx->vsync_handler)
mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
mutex_unlock(&ctx->vsync_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index c9acc65..a1f1bcc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -245,7 +245,7 @@
ctx->format = rot->format;
- ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+ ctx->rot90 = !!(rot->flags & MDP_ROT_90);
if (ctx->rot90) {
ctx->opmode |= BIT(5); /* ROT 90 */
swap(ctx->width, ctx->height);
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 569e381..f537c39 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,8 @@
#define CHECK_BOUNDS(offset, size, max_size) \
(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+static atomic_t ov_active_panels = ATOMIC_INIT(0);
+
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
@@ -196,7 +198,9 @@
return -EINVAL;
}
- rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+ /* keep only flags of interest to rotator */
+ rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
+ MDP_SECURE_OVERLAY_SESSION);
rot->format = fmt->format;
rot->img_width = req->src.width;
@@ -207,7 +211,7 @@
rot->src_rect.h = req->src_rect.h;
if (req->flags & MDP_DEINTERLACE) {
- rot->rotations |= MDP_DEINTERLACE;
+ rot->flags |= MDP_DEINTERLACE;
rot->src_rect.h /= 2;
}
@@ -356,8 +360,14 @@
{
int ret;
- if (!mfd->panel_power_on)
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
return -EPERM;
+ }
if (req->flags & MDSS_MDP_ROT_ONLY) {
ret = mdss_mdp_overlay_rotator_setup(mfd, req);
@@ -372,18 +382,22 @@
req->z_order -= MDSS_MDP_STAGE_0;
}
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
static inline int mdss_mdp_overlay_get_buf(struct msm_fb_data_type *mfd,
struct mdss_mdp_data *data,
struct msmfb_data *planes,
- int num_planes)
+ int num_planes,
+ u32 flags)
{
int i;
memset(data, 0, sizeof(*data));
for (i = 0; i < num_planes; i++) {
+ data->p[i].flags = flags;
mdss_mdp_get_img(&planes[i], &data->p[i]);
if (data->p[0].len == 0)
break;
@@ -411,35 +425,18 @@
return 0;
}
-static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe, *tmp;
- struct msm_fb_data_type *mfd = ctl->mfd;
- int i, ret;
+ LIST_HEAD(destroy_pipes);
+ int i;
- if (mfd->kickoff_fnc)
- ret = mfd->kickoff_fnc(ctl);
- else
- ret = mdss_mdp_display_commit(ctl, NULL);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- complete(&mfd->update.comp);
- mutex_lock(&mfd->no_update.lock);
- if (mfd->no_update.timer.function)
- del_timer(&(mfd->no_update.timer));
-
- mfd->no_update.timer.expires = jiffies + (2 * HZ);
- add_timer(&mfd->no_update.timer);
- mutex_unlock(&mfd->no_update.lock);
-
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
- list_del(&pipe->cleanup_list);
+ list_move(&pipe->cleanup_list, &destroy_pipes);
for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
-
- mdss_mdp_pipe_destroy(pipe);
}
if (!list_empty(&mfd->pipes_used)) {
@@ -458,36 +455,44 @@
}
}
mutex_unlock(&mfd->lock);
+ list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+ mdss_mdp_pipe_destroy(pipe);
+ mutex_unlock(&mfd->ov_lock);
+
+ return 0;
+}
+
+static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+{
+ struct msm_fb_data_type *mfd = ctl->mfd;
+ int ret;
+
+ if (mfd->kickoff_fnc)
+ ret = mfd->kickoff_fnc(ctl);
+ else
+ ret = mdss_mdp_display_commit(ctl, NULL);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ complete(&mfd->update.comp);
+ mutex_lock(&mfd->no_update.lock);
+ if (mfd->no_update.timer.function)
+ del_timer(&(mfd->no_update.timer));
+
+ mfd->no_update.timer.expires = jiffies + (2 * HZ);
+ add_timer(&mfd->no_update.timer);
+ mutex_unlock(&mfd->no_update.lock);
+
+ ret = mdss_mdp_overlay_cleanup(mfd);
return ret;
}
-static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
{
struct mdss_mdp_pipe *pipe;
- int i, ret = 0;
u32 pipe_ndx, unset_ndx = 0;
-
- if (!mfd || !mfd->ctl)
- return -ENODEV;
-
- pr_debug("unset ndx=%x\n", ndx);
-
- if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(ndx);
- if (rot) {
- mdss_mdp_rotator_finish(rot);
- } else {
- pr_warn("unknown session id=%x\n", ndx);
- ret = -ENODEV;
- }
-
- return ret;
- }
-
- if (!mfd->ctl->power_on)
- return 0;
+ int i;
for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
pipe_ndx = BIT(i);
@@ -505,37 +510,59 @@
mdss_mdp_mixer_pipe_unstage(pipe);
}
}
+ return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+ int ret = 0;
+
+ if (!mfd || !mfd->ctl)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ pr_debug("unset ndx=%x\n", ndx);
+
+ if (ndx & MDSS_MDP_ROT_SESSION_MASK)
+ ret = mdss_mdp_rotator_release(ndx);
+ else
+ ret = mdss_mdp_overlay_release(mfd, ndx);
+
+ mutex_unlock(&mfd->ov_lock);
return ret;
}
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+static int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
u32 unset_ndx = 0;
int cnt = 0;
+ mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
- if (!list_empty(&mfd->pipes_used)) {
- list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- if (pipe->ndx & MDSS_MDP_ROT_SESSION_MASK) {
- struct mdss_mdp_rotator_session *rot;
- rot = mdss_mdp_rotator_session_get(pipe->ndx);
- if (rot)
- mdss_mdp_rotator_finish(rot);
- } else {
- unset_ndx |= pipe->ndx;
- cnt++;
- }
- }
+ list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+ unset_ndx |= pipe->ndx;
+ cnt++;
}
mutex_unlock(&mfd->lock);
if (unset_ndx) {
pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
- mdss_mdp_overlay_unset(mfd, unset_ndx);
- mdss_mdp_overlay_kickoff(mfd->ctl);
+ mdss_mdp_overlay_release(mfd, unset_ndx);
}
+ mutex_unlock(&mfd->ov_lock);
+
+ if (cnt)
+ mdss_mdp_overlay_kickoff(mfd->ctl);
return 0;
}
@@ -561,26 +588,28 @@
struct mdss_mdp_rotator_session *rot;
struct mdss_mdp_data src_data, dst_data;
int ret;
+ u32 flgs;
- ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1);
+ rot = mdss_mdp_rotator_session_get(req->id);
+ if (!rot) {
+ pr_err("invalid session id=%x\n", req->id);
+ return -ENOENT;
+ }
+
+ flgs = rot->flags & MDP_SECURE_OVERLAY_SESSION;
+
+ ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1, flgs);
if (ret) {
pr_err("src_data pmem error\n");
goto rotate_done;
}
- ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1);
+ ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1, flgs);
if (ret) {
pr_err("dst_data pmem error\n");
goto rotate_done;
}
- rot = mdss_mdp_rotator_session_get(req->id);
- if (!rot) {
- pr_err("invalid session id=%x\n", req->id);
- ret = -ENODEV;
- goto rotate_done;
- }
-
ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
if (ret) {
pr_err("rotator queue error session id=%x\n", req->id);
@@ -601,6 +630,7 @@
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_data *src_data;
int ret, buf_ndx;
+ u32 flags;
pipe = mdss_mdp_pipe_get_locked(req->id);
if (pipe == NULL) {
@@ -610,11 +640,13 @@
pr_debug("ov queue pnum=%d\n", pipe->num);
+ flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
+
buf_ndx = (pipe->play_cnt + 1) & 1; /* next buffer */
src_data = &pipe->buffers[buf_ndx];
mdss_mdp_overlay_free_buf(src_data);
- ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1);
+ ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret)) {
pr_err("src_data pmem error\n");
} else {
@@ -625,9 +657,6 @@
ctl = pipe->mixer->ctl;
mdss_mdp_pipe_unlock(pipe);
- if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL))
- ret = mdss_mdp_overlay_kickoff(ctl);
-
return ret;
}
@@ -638,14 +667,29 @@
pr_debug("play req id=%x\n", req->id);
- if (!mfd->panel_power_on)
- return -EPERM;
+ ret = mutex_lock_interruptible(&mfd->ov_lock);
+ if (ret)
+ return ret;
- if (req->id & MDSS_MDP_ROT_SESSION_MASK)
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return -EPERM;
+ }
+
+ if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
ret = mdss_mdp_overlay_rotate(mfd, req);
- else
+ } else {
ret = mdss_mdp_overlay_queue(mfd, req);
+ if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL)) {
+ mutex_unlock(&mfd->ov_lock);
+ ret = mdss_mdp_overlay_kickoff(mfd->ctl);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -719,10 +763,7 @@
u32 offset;
int bpp, ret;
- if (!mfd)
- return;
-
- if (!mfd->ctl || !mfd->panel_power_on)
+ if (!mfd || !mfd->ctl)
return;
fbi = mfd->fbi;
@@ -732,6 +773,14 @@
return;
}
+ if (mutex_lock_interruptible(&mfd->ov_lock))
+ return;
+
+ if (!mfd->panel_power_on) {
+ mutex_unlock(&mfd->ov_lock);
+ return;
+ }
+
memset(&data, 0, sizeof(data));
bpp = fbi->var.bits_per_pixel / 8;
@@ -782,6 +831,7 @@
return;
}
}
+ mutex_unlock(&mfd->ov_lock);
if (fbi->var.activate & FB_ACTIVATE_VBL)
mdss_mdp_overlay_kickoff(mfd->ctl);
@@ -853,9 +903,9 @@
}
ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
- mdss_get_iommu_domain(), 0,
- MDSS_MDP_CURSOR_SIZE, SZ_4K,
- 0, &(mfd->cursor_buf_iova));
+ mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
+ 0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
+ &(mfd->cursor_buf_iova));
if (IS_ERR_VALUE(ret)) {
dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
mfd->cursor_buf,
@@ -1076,10 +1126,36 @@
return ret;
}
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ rc = mdss_mdp_ctl_on(mfd);
+ if (rc == 0)
+ atomic_inc(&ov_active_panels);
+
+ return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ mdss_mdp_overlay_release_all(mfd);
+
+ rc = mdss_mdp_ctl_off(mfd);
+ if (rc == 0) {
+ if (atomic_dec_return(&ov_active_panels) == 0)
+ mdss_mdp_rotator_release_all();
+ }
+
+ return rc;
+}
+
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
- mfd->on_fnc = mdss_mdp_ctl_on;
- mfd->off_fnc = mdss_mdp_ctl_off;
+ mfd->on_fnc = mdss_mdp_overlay_on;
+ mfd->off_fnc = mdss_mdp_overlay_off;
mfd->hw_refresh = true;
mfd->do_histogram = NULL;
mfd->overlay_play_enable = true;
@@ -1092,6 +1168,7 @@
INIT_LIST_HEAD(&mfd->pipes_used);
INIT_LIST_HEAD(&mfd->pipes_cleanup);
+ mutex_init(&mfd->ov_lock);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 3b04633..459cf14 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -268,9 +268,7 @@
atomic_read(&pipe->ref_cnt));
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- mutex_lock(&mdss_mdp_sspp_lock);
mdss_mdp_pipe_free(pipe);
- mutex_unlock(&mdss_mdp_sspp_lock);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return 0;
@@ -522,9 +520,13 @@
{
struct mdss_mdp_format_params *fmt;
u32 opmode, chroma_samp, unpack, src_format;
+ u32 secure = 0;
fmt = pipe->src_fmt;
+ if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
opmode = pipe->bwc_mode;
if (pipe->flags & MDP_FLIP_LR)
opmode |= MDSS_MDP_OP_FLIP_LR;
@@ -571,6 +573,7 @@
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index dc1cb0d..1e58269 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,12 +187,17 @@
{
struct mdss_mdp_pipe *rot_pipe;
struct mdss_mdp_ctl *ctl;
- int ret;
+ int ret, need_wait = false;
- if (!rot)
+ ret = mutex_lock_interruptible(&rotator_lock);
+ if (ret)
+ return ret;
+
+ if (!rot || !rot->ref_cnt) {
+ mutex_unlock(&rotator_lock);
return -ENODEV;
+ }
- mutex_lock(&rotator_lock);
ret = mdss_mdp_rotator_pipe_dequeue(rot);
if (ret) {
pr_err("unable to acquire rotator\n");
@@ -207,7 +212,7 @@
if (rot->params_changed) {
rot->params_changed = 0;
- rot_pipe->flags = rot->rotations;
+ rot_pipe->flags = rot->flags;
rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
rot_pipe->img_width = rot->img_width;
rot_pipe->img_height = rot->img_height;
@@ -225,16 +230,18 @@
ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
+ if (ret == 0 && !rot->no_wait)
+ need_wait = true;
done:
mutex_unlock(&rotator_lock);
- if (!rot->no_wait)
+ if (need_wait)
mdss_mdp_rotator_busy_wait(rot);
return ret;
}
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
{
struct mdss_mdp_pipe *rot_pipe;
@@ -243,7 +250,6 @@
pr_debug("finish rot id=%x\n", rot->session_id);
- mutex_lock(&rotator_lock);
rot_pipe = rot->pipe;
if (rot_pipe) {
mdss_mdp_rotator_busy_wait(rot);
@@ -255,7 +261,43 @@
mdss_mdp_pipe_destroy(rot_pipe);
mdss_mdp_wb_mixer_destroy(mixer);
}
+
+ return 0;
+}
+
+int mdss_mdp_rotator_release(u32 ndx)
+{
+ struct mdss_mdp_rotator_session *rot;
+ mutex_lock(&rotator_lock);
+ rot = mdss_mdp_rotator_session_get(ndx);
+ if (rot) {
+ mdss_mdp_rotator_finish(rot);
+ } else {
+ pr_warn("unknown session id=%x\n", ndx);
+ return -ENOENT;
+ }
mutex_unlock(&rotator_lock);
return 0;
}
+
+int mdss_mdp_rotator_release_all(void)
+{
+ struct mdss_mdp_rotator_session *rot;
+ int i, cnt;
+
+ mutex_lock(&rotator_lock);
+ for (i = 0, cnt = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+ rot = &rotator_session[i];
+ if (rot->ref_cnt) {
+ mdss_mdp_rotator_finish(rot);
+ cnt++;
+ }
+ }
+ mutex_unlock(&rotator_lock);
+
+ if (cnt)
+ pr_debug("cleaned up %d rotator sessions\n", cnt);
+
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index eb5b47a..70ef6bf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
u32 params_changed;
u32 format;
- u32 rotations;
+ u32 flags;
u16 img_width;
u16 img_height;
@@ -48,7 +48,8 @@
int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
-int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release_all(void);
#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index ee9582a..9f2df85 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -316,9 +316,20 @@
} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr);
- if (is_mdss_iommu_attached())
+ if (is_mdss_iommu_attached()) {
+ int domain;
+ if (data->flags & MDP_SECURE_OVERLAY_SESSION)
+ domain = MDSS_IOMMU_DOMAIN_SECURE;
+ else
+ domain = MDSS_IOMMU_DOMAIN_UNSECURE;
ion_unmap_iommu(iclient, data->srcp_ihdl,
- mdss_get_iommu_domain(), 0);
+ mdss_get_iommu_domain(domain), 0);
+
+ if (domain == MDSS_IOMMU_DOMAIN_SECURE) {
+ msm_ion_unsecure_buffer(iclient,
+ data->srcp_ihdl);
+ }
+ }
ion_free(iclient, data->srcp_ihdl);
data->srcp_ihdl = NULL;
@@ -339,7 +350,7 @@
start = (unsigned long *) &data->addr;
len = (unsigned long *) &data->len;
- data->flags = img->flags;
+ data->flags |= img->flags;
data->p_need = 0;
if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -374,8 +385,24 @@
}
if (is_mdss_iommu_attached()) {
+ int domain;
+ if (data->flags & MDP_SECURE_OVERLAY_SESSION) {
+ domain = MDSS_IOMMU_DOMAIN_SECURE;
+ ret = msm_ion_secure_buffer(iclient,
+ data->srcp_ihdl, 0x2,
+ ION_UNSECURE_DELAYED);
+ if (IS_ERR_VALUE(ret)) {
+ ion_free(iclient, data->srcp_ihdl);
+ pr_err("failed to secure handle (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+ }
+
ret = ion_map_iommu(iclient, data->srcp_ihdl,
- mdss_get_iommu_domain(),
+ mdss_get_iommu_domain(domain),
0, SZ_4K, 0, start, len, 0,
ION_IOMMU_UNMAP_DELAYED);
} else {
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index b18efbe..b74523b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -96,8 +96,9 @@
ion_phys(iclient, ihdl, &mdss_wb_mem, &img_size);
if (is_mdss_iommu_attached()) {
+ int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
rc = ion_map_iommu(iclient, ihdl,
- mdss_get_iommu_domain(),
+ mdss_get_iommu_domain(domain),
0, SZ_4K, 0,
(unsigned long *) &img->addr,
(unsigned long *) &img->len,
@@ -569,6 +570,6 @@
int msm_fb_get_iommu_domain(void)
{
- return mdss_get_iommu_domain();
+ return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
}
EXPORT_SYMBOL(msm_fb_get_iommu_domain);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 5cdfe34..28d7051 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -55,6 +55,17 @@
MAX_PHYS_TARGET_NUM,
};
+enum mdss_intf_events {
+ MDSS_EVENT_RESET,
+ MDSS_EVENT_UNBLANK,
+ MDSS_EVENT_TIMEGEN_ON,
+ MDSS_EVENT_BLANK,
+ MDSS_EVENT_TIMEGEN_OFF,
+ MDSS_EVENT_CLOSE,
+ MDSS_EVENT_SUSPEND,
+ MDSS_EVENT_RESUME,
+};
+
/* panel info type */
struct lcd_panel_info {
u32 vsync_enable;
@@ -178,14 +189,11 @@
struct mdss_panel_data {
struct mdss_panel_info panel_info;
- void (*set_backlight) (struct mdss_panel_data *pdata,
- u32 bl_level);
- int (*intf_unprepare) (struct mdss_panel_data *pdata);
+ void (*set_backlight) (struct mdss_panel_data *pdata, u32 bl_level);
unsigned char *mmss_cc_base;
/* function entry chain */
- int (*on) (struct mdss_panel_data *pdata);
- int (*off) (struct mdss_panel_data *pdata);
+ int (*event_handler) (struct mdss_panel_data *pdata, int e, void *arg);
};
int mdss_register_panel(struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index d4c924f..c3dc06b 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,15 +25,10 @@
#include "mdss_panel.h"
-static int mdss_wb_on(struct mdss_panel_data *pdata)
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
{
- pr_debug("%s\n", __func__);
- return 0;
-}
-
-static int mdss_wb_off(struct mdss_panel_data *pdata)
-{
- pr_debug("%s\n", __func__);
+ pr_debug("%s: event=%d\n", __func__, event);
return 0;
}
@@ -75,8 +70,7 @@
pdata->panel_info.pdest = DISPLAY_3;
pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
- pdata->on = mdss_wb_on;
- pdata->off = mdss_wb_off;
+ pdata->event_handler = mdss_wb_event_handler;
pdev->dev.platform_data = pdata;
rc = mdss_register_panel(pdata);
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5189b6d..e6e9624 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -2918,17 +2918,9 @@
return ret;
}
-static int msmfb_overlay_commit(struct fb_info *info, unsigned long *argp)
+static int msmfb_overlay_commit(struct fb_info *info)
{
- int ret, ndx;
-
- ret = copy_from_user(&ndx, argp, sizeof(ndx));
- if (ret) {
- pr_err("%s: ioctl failed\n", __func__);
- return ret;
- }
-
- return mdp4_overlay_commit(info, ndx);
+ return mdp4_overlay_commit(info);
}
static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
@@ -3362,7 +3354,7 @@
break;
case MSMFB_OVERLAY_COMMIT:
down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_commit(info, argp);
+ ret = msmfb_overlay_commit(info);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_PLAY:
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index a9709fb..b84ae44 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -784,7 +784,11 @@
buf_pool->allocated--;
}
- memset(buf_entry, 0, sizeof(struct vcd_buffer_entry));
+ buf_entry->valid = buf_entry->allocated = buf_entry->in_use = 0;
+ buf_entry->alloc = buf_entry->virtual = buf_entry->physical = NULL;
+ buf_entry->sz = 0;
+ memset(&buf_entry->frame, 0, sizeof(struct vcd_frame_data));
+
buf_pool->validated--;
if (buf_pool->validated == 0)
vcd_free_buffer_pool_entries(buf_pool);
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
new file mode 100644
index 0000000..613cd9f
--- /dev/null
+++ b/include/linux/msm_ipa.h
@@ -0,0 +1,714 @@
+#ifndef _MSM_IPA_H_
+#define _MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_NAT_DMA 24
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_GET_ASYNC_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_MAX 29
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 20
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS (1ul << 0)
+#define IPA_FLT_PROTOCOL (1ul << 1)
+#define IPA_FLT_SRC_ADDR (1ul << 2)
+#define IPA_FLT_DST_ADDR (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE (1ul << 6)
+#define IPA_FLT_CODE (1ul << 7)
+#define IPA_FLT_SPI (1ul << 8)
+#define IPA_FLT_SRC_PORT (1ul << 9)
+#define IPA_FLT_DST_PORT (1ul << 10)
+#define IPA_FLT_TC (1ul << 11)
+#define IPA_FLT_FLOW_LABEL (1ul << 12)
+#define IPA_FLT_NEXT_HDR (1ul << 13)
+#define IPA_FLT_META_DATA (1ul << 14)
+#define IPA_FLT_FRAGMENT (1ul << 15)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+ IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+ IPA_CLIENT_HSIC2_PROD,
+ IPA_CLIENT_HSIC3_PROD,
+ IPA_CLIENT_HSIC4_PROD,
+ IPA_CLIENT_HSIC5_PROD,
+ IPA_CLIENT_USB_PROD,
+ IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+ IPA_CLIENT_A2_EMBEDDED_PROD,
+ IPA_CLIENT_A2_TETHERED_PROD,
+ IPA_CLIENT_A5_LAN_WAN_PROD,
+ IPA_CLIENT_A5_CMD_PROD,
+ IPA_CLIENT_Q6_LAN_PROD,
+
+ IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+ IPA_CLIENT_HSIC2_CONS,
+ IPA_CLIENT_HSIC3_CONS,
+ IPA_CLIENT_HSIC4_CONS,
+ IPA_CLIENT_HSIC5_CONS,
+ IPA_CLIENT_USB_CONS,
+ IPA_CLIENT_A2_EMBEDDED_CONS,
+ IPA_CLIENT_A2_TETHERED_CONS,
+ IPA_CLIENT_A5_LAN_WAN_CONS,
+ IPA_CLIENT_Q6_LAN_CONS,
+
+ IPA_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+ IPA_IP_v4,
+ IPA_IP_v6,
+ IPA_IP_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., A5): 5'd3
+ */
+enum ipa_flt_action {
+ IPA_PASS_TO_ROUTING,
+ IPA_PASS_TO_SRC_NAT,
+ IPA_PASS_TO_DST_NAT,
+ IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+ uint32_t attrib_mask;
+ uint16_t src_port_lo;
+ uint16_t src_port_hi;
+ uint16_t dst_port_lo;
+ uint16_t dst_port_hi;
+ uint8_t type;
+ uint8_t code;
+ uint32_t spi;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint32_t meta_data;
+ uint32_t meta_data_mask;
+ union {
+ struct {
+ uint8_t tos;
+ uint8_t protocol;
+ uint32_t src_addr;
+ uint32_t src_addr_mask;
+ uint32_t dst_addr;
+ uint32_t dst_addr_mask;
+ } v4;
+ struct {
+ uint8_t tc;
+ uint32_t flow_label;
+ uint8_t next_hdr;
+ uint32_t src_addr[4];
+ uint32_t src_addr_mask[4];
+ uint32_t dst_addr[4];
+ uint32_t dst_addr_mask[4];
+ } v6;
+ } u;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ */
+struct ipa_flt_rule {
+ enum ipa_flt_action action;
+ uint32_t rt_tbl_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+ it is not an index or an offset
+ * @attrib: attributes of the rule
+ */
+struct ipa_rt_rule {
+ enum ipa_client_type dst;
+ uint32_t hdr_hdl;
+ struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out paramerer, handle to header, valid when status is 0
+ * @status: out paramerer, status of header add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_add {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+ uint32_t hdr_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr: all headers need to go here back to
+ * back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+ uint8_t commit;
+ uint8_t num_hdrs;
+ struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr: out parameter, contents of specified header,
+ * valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ * valid only when ioctl return val is non-negative
+ * @is_partial: out parameter, indicates whether specified header is partial
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_copy_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t hdr[IPA_HDR_MAX_SIZE];
+ uint8_t hdr_len;
+ uint8_t is_partial;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl: out parameter, handle of header entry
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status: out parameter, status of header remove operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_hdr_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+ uint8_t commit;
+ uint8_t num_hdls;
+ struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status: output parameter, status of route rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of filtering rule add operation,
+ * 0 for success,
+ * -1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+ struct ipa_flt_rule rule;
+ uint8_t at_rear;
+ uint32_t flt_rule_hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep: which "clients" pipe does this rule apply to?
+ * valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ enum ipa_client_type ep;
+ uint8_t global;
+ uint8_t num_rules;
+ struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status: output parameter, status of filtering rule delete operation,
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_flt_rule_del {
+ uint32_t hdl;
+ int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ uint8_t num_hdls;
+ struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl: output parameter, handle of routing table, valid only when ioctl
+ * return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+ enum ipa_ip_type ip;
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props: output parameter, number of tx properties
+ * valid only when ioctl return val is non-negative
+ * @num_rx_props: output parameter, number of rx properties
+ * valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_query_intf {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint32_t num_tx_props;
+ uint32_t num_rx_props;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type dst_pipe;
+ char hdr_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ */
+struct ipa_ioc_rx_intf_prop {
+ enum ipa_ip_type ip;
+ struct ipa_rule_attrib attrib;
+ enum ipa_client_type src_pipe;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+ char name[IPA_RESOURCE_NAME_MAX];
+ struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+ char dev_name[IPA_RESOURCE_NAME_MAX];
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+ uint8_t tbl_index;
+ uint32_t ipv4_rules_offset;
+ uint32_t expn_rules_offset;
+
+ uint32_t index_offset;
+ uint32_t index_expn_offset;
+
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+ uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+ uint8_t table_index;
+ uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr: type of table, from which the base address of the table
+ * can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+ uint8_t table_index;
+ uint8_t base_addr;
+
+ uint32_t offset;
+ uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+ uint8_t entries;
+ struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @msg_len: the length of the message in bytes
+ * @rsvd: reserved bits for future use.
+ *
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * buffer of atleast this size in an continuous loop, call will block when there
+ * is no pending async message.
+ *
+ * After reading a message's meta-data using above scheme, client should issue a
+ * GET_MSG IOCTL to actually read the message itself into the buffer of
+ * "msg_len" immediately following the ipa_msg_meta itself in the IOCTL payload
+ */
+struct ipa_msg_meta {
+ uint8_t msg_type;
+ uint16_t msg_len;
+ uint8_t rsvd;
+};
+
+/**
+ * actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_RT, \
+ enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COMMIT_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RESET_FLT, \
+ enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_RT_TBL, \
+ uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PUT_HDR, \
+ uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_DMA, \
+ struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+ IPA_IOCTL_SET_FLT, \
+ uint32_t)
+#define IPA_IOC_GET_ASYNC_MSG _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_ASYNC_MSG, \
+ struct ipa_msg_meta *)
+
+#endif /* _MSM_IPA_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 98050ce..d3f6792 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -71,7 +71,7 @@
#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
#define MSMFB_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
#define MSMFB_METADATA_SET _IOW(MSMFB_IOCTL_MAGIC, 162, struct msmfb_metadata)
-#define MSMFB_OVERLAY_COMMIT _IOW(MSMFB_IOCTL_MAGIC, 163, unsigned int)
+#define MSMFB_OVERLAY_COMMIT _IO(MSMFB_IOCTL_MAGIC, 163)
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 8f86fce..b1f534d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1811,6 +1811,28 @@
V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE = 1
};
#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE (V4L2_CID_MPEG_MSM_VIDC_BASE+24)
+#define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
+enum v4l2_mpeg_vidc_extradata {
+ V4L2_MPEG_VIDC_EXTRADATA_NONE,
+ V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION,
+ V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO,
+ V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP,
+ V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP,
+ V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP,
+ V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING,
+ V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE,
+ V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW,
+ V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI,
+ V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD,
+ V4L2_MPEG_VIDC_EXTRADATA_AFD_UD,
+ V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO,
+ V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB,
+ V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM,
+ V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+};
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/lib/Kconfig b/lib/Kconfig
index f1621d5..8437e36 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -387,4 +387,11 @@
The kernel drivers receive the QMI message over a transport
and then decode it into a C structure.
+config QMI_ENCDEC_DEBUG
+ bool
+ help
+ Kernel config option to enable debugging QMI Encode/Decode
+ library. This will log the information regarding the element
+ and message being encoded & decoded.
+
endmenu
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
index d759885..40273d0 100644
--- a/lib/qmi_encdec.c
+++ b/lib/qmi_encdec.c
@@ -25,6 +25,61 @@
#define TLV_LEN_SIZE sizeof(uint16_t)
#define TLV_TYPE_SIZE sizeof(uint8_t)
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+ const u8 *ptr = buf; \
+ int i, linelen, remaining = buf_len; \
+ int rowsize = 16, groupsize = 1; \
+ unsigned char linebuf[256]; \
+ for (i = 0; i < buf_len; i += rowsize) { \
+ linelen = min(remaining, rowsize); \
+ remaining -= linelen; \
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+ linebuf, sizeof(linebuf), false); \
+ pr_debug("%s: %s\n", prefix_str, linebuf); \
+ } \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) do { \
+ qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+ pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+ level, elem_len, elem_size); \
+ qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) do { \
+ pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
static int _qmi_kernel_encode(struct elem_info *ei_array,
void *out_buf, void *in_c_struct,
int enc_level);
@@ -232,6 +287,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_encode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+ temp_ei->elem_size, buf_src);
UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
encoded_bytes, tlv_len, encode_tlv, rc);
break;
@@ -253,6 +310,7 @@
if (encode_tlv && enc_level == 1) {
QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
tlv_pointer = buf_dst;
tlv_len = 0;
@@ -260,6 +318,7 @@
encode_tlv = 0;
}
}
+ QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
return encoded_bytes;
}
@@ -419,11 +478,13 @@
void *buf_src = in_buf;
int rc;
+ QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
while (decoded_bytes < in_buf_len) {
if (dec_level == 1) {
tlv_pointer = buf_src;
QMI_ENCDEC_DECODE_TLV(&tlv_type,
&tlv_len, tlv_pointer);
+ QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
temp_ei = find_ei(ei_array, tlv_type);
@@ -470,6 +531,8 @@
case QMI_SIGNED_4_BYTE_ENUM:
rc = qmi_decode_basic_elem(buf_dst, buf_src,
data_len_value, temp_ei->elem_size);
+ QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+ temp_ei->elem_size, buf_dst);
UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
break;
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 59e220d..76cd625 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -1702,6 +1702,19 @@
return 0;
}
+
+static int mdm9615_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ pr_debug("%s()\n", __func__);
+ rate->min = rate->max = 48000;
+
+ return 0;
+}
+
static int mdm9615_aux_pcm_get_gpios(void)
{
int ret = 0;
@@ -2134,6 +2147,43 @@
.be_hw_params_fixup = mdm9615_auxpcm_be_params_fixup,
.ops = &mdm9615_sec_auxpcm_be_ops,
},
+ /* Incall Music BACK END DAI Link */
+ {
+ .name = LPASS_BE_VOICE_PLAYBACK_TX,
+ .stream_name = "Voice Farend Playback",
+ .cpu_dai_name = "msm-dai-q6.32773",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Uplink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_TX,
+ .stream_name = "Voice Uplink Capture",
+ .cpu_dai_name = "msm-dai-q6.32772",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ },
+ /* Incall Record Downlink BACK END DAI Link */
+ {
+ .name = LPASS_BE_INCALL_RECORD_RX,
+ .stream_name = "Voice Downlink Capture",
+ .cpu_dai_name = "msm-dai-q6.32771",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+ .ignore_pmdown_time = 1, /* this dailink has playback support */
+ },
};
static struct snd_soc_dai_link mdm9615_dai_i2s_tabla[] = {
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 90c96b4..d0bfb76 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -925,7 +925,6 @@
clk_put(mi2s_bit_clk);
mi2s_bit_clk = NULL;
}
- msm_mi2s_free_gpios();
}
static int configure_mi2s_gpio(void)
@@ -958,7 +957,6 @@
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- configure_mi2s_gpio();
mi2s_bit_clk = clk_get(cpu_dai->dev, "bit_clk");
if (IS_ERR(mi2s_bit_clk))
return PTR_ERR(mi2s_bit_clk);
@@ -1138,7 +1136,6 @@
clk_put(sec_i2s_rx_osr_clk);
sec_i2s_rx_osr_clk = NULL;
}
- mpq8064_sec_i2s_rx_free_gpios();
}
pr_info("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -1177,7 +1174,6 @@
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- configure_sec_i2s_rx_gpio();
sec_i2s_rx_osr_clk = clk_get(cpu_dai->dev, "osr_clk");
if (IS_ERR(sec_i2s_rx_osr_clk)) {
pr_err("Failed to get sec_i2s_rx_osr_clk\n");
@@ -1695,7 +1691,8 @@
kfree(mbhc_cfg.calibration);
return ret;
}
-
+ configure_sec_i2s_rx_gpio();
+ configure_mi2s_gpio();
return ret;
}
@@ -1707,6 +1704,8 @@
pr_err("%s: Not the right machine type\n", __func__);
return ;
}
+ mpq8064_sec_i2s_rx_free_gpios();
+ msm_mi2s_free_gpios();
platform_device_unregister(msm_snd_device);
kfree(mbhc_cfg.calibration);
}
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 9e08be3..f151e51 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -30,7 +30,7 @@
#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
-static int enable_ocmem_audio_voice;
+static int enable_ocmem_audio_voice = 1;
module_param(enable_ocmem_audio_voice, int,
S_IRUGO | S_IWUSR | S_IWGRP);
MODULE_PARM_DESC(enable_ocmem_audio_voice, "control OCMEM usage for audio/voice");
@@ -423,10 +423,10 @@
struct voice_ocmem_workdata *workdata = NULL;
if (enable) {
- if (enable_ocmem_audio_voice)
- audio_ocmem_lcl.ocmem_en = true;
- else
+ if (!enable_ocmem_audio_voice)
audio_ocmem_lcl.ocmem_en = false;
+ else
+ audio_ocmem_lcl.ocmem_en = true;
}
if (audio_ocmem_lcl.ocmem_en) {
if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
@@ -527,10 +527,10 @@
struct audio_ocmem_workdata *workdata = NULL;
if (enable) {
- if (enable_ocmem_audio_voice)
- audio_ocmem_lcl.ocmem_en = true;
- else
+ if (!enable_ocmem_audio_voice)
audio_ocmem_lcl.ocmem_en = false;
+ else
+ audio_ocmem_lcl.ocmem_en = true;
}
if (audio_ocmem_lcl.ocmem_en) {
@@ -611,7 +611,7 @@
atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_DEFAULT);
atomic_set(&audio_ocmem_lcl.audio_exit, 0);
spin_lock_init(&audio_ocmem_lcl.audio_lock);
- audio_ocmem_lcl.ocmem_en = false;
+ audio_ocmem_lcl.ocmem_en = true;
/* populate platform data */
ret = audio_ocmem_platform_data_populate(pdev);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index fd340cf..0ddaabe 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2369,7 +2369,7 @@
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
if (buf_node->buf_addr_lsw == buf_add) {
- pr_info("%s: Found the element\n", __func__);
+ pr_debug("%s: Found the element\n", __func__);
mem_unmap.mem_map_handle = buf_node->mmap_hdl;
break;
}
@@ -2387,7 +2387,7 @@
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
if (!rc) {
- pr_err("timeout. waited for memory_map\n");
+ pr_err("timeout. waited for memory_unmap\n");
rc = -EINVAL;
goto fail_cmd;
}
@@ -2397,6 +2397,7 @@
if (buf_node->buf_addr_lsw == buf_add) {
list_del(&buf_node->list);
kfree(buf_node);
+ break;
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 7daf42a..b799e59 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
#include <mach/socinfo.h>
+#include <mach/qdsp6v2/apr_tal.h>
#include "sound/apr_audio-v2.h"
#include "sound/q6afe-v2.h"
@@ -208,6 +209,8 @@
static int voice_apr_register(void)
{
+ void *modem_mvm, *modem_cvs, *modem_cvp;
+
pr_debug("%s\n", __func__);
mutex_lock(&common.common_lock);
@@ -224,6 +227,18 @@
pr_err("%s: Unable to register MVM\n", __func__);
goto err;
}
+
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_mvm = apr_register("MODEM", "MVM",
+ qdsp_mvm_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_mvm == NULL)
+ pr_err("%s: Unable to register MVM for MODEM\n",
+ __func__);
}
if (common.apr_q6_cvs == NULL) {
@@ -238,6 +253,18 @@
goto err;
}
rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvs = apr_register("MODEM", "CVS",
+ qdsp_cvs_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvs == NULL)
+ pr_err("%s: Unable to register CVS for MODEM\n",
+ __func__);
+
}
if (common.apr_q6_cvp == NULL) {
@@ -252,6 +279,18 @@
goto err;
}
rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+ /*
+ * Register with modem for SSR callback. The APR handle
+ * is not stored since it is used only to receive notifications
+ * and not for communication
+ */
+ modem_cvp = apr_register("MODEM", "CVP",
+ qdsp_cvp_callback,
+ 0xFFFFFFFF, &common);
+ if (modem_cvp == NULL)
+ pr_err("%s: Unable to register CVP for MODEM\n",
+ __func__);
+
}
mutex_unlock(&common.common_lock);
@@ -606,8 +645,9 @@
cvs_handle = voice_get_cvs_handle(v);
/* MVM, CVS sessions are destroyed only for Full control sessions. */
- if (is_voip_session(v->session_id)) {
- pr_debug("%s: MVM detach stream\n", __func__);
+ if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__,
+ v->voc_state);
/* Detach voice stream. */
detach_stream.hdr.hdr_field =
@@ -3713,7 +3753,9 @@
mutex_lock(&v->lock);
- if (v->voc_state == VOC_RUN) {
+ if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
+
ret = voice_destroy_vocproc(v);
if (ret < 0)
pr_err("%s: destroy voice failed\n", __func__);
@@ -3738,6 +3780,13 @@
mutex_lock(&v->lock);
+ if (v->voc_state == VOC_ERROR) {
+ pr_debug("%s: VOC in ERR state\n", __func__);
+
+ voice_destroy_mvm_cvs_session(v);
+ v->voc_state = VOC_INIT;
+ }
+
if ((v->voc_state == VOC_INIT) ||
(v->voc_state == VOC_RELEASE)) {
ret = voice_apr_register();
@@ -3828,6 +3877,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3836,6 +3886,36 @@
c = priv;
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received MODEM reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+ apr_reset(c->apr_q6_mvm);
+ c->apr_q6_mvm = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].mvm_handle = 0;
+ }
+ return 0;
+ }
+
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
v = voice_get_session(data->dest_port);
@@ -3845,23 +3925,6 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_mvm);
- c->apr_q6_mvm = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].mvm_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
@@ -3946,6 +4009,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -3955,6 +4019,35 @@
c = priv;
pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvs);
+ c->apr_q6_cvs = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvs_handle = 0;
+ }
+ return 0;
+ }
v = voice_get_session(data->dest_port);
if (v == NULL) {
@@ -3963,23 +4056,6 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvs);
- c->apr_q6_cvs = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvs_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
@@ -4180,6 +4256,7 @@
struct common_data *c = NULL;
struct voice_data *v = NULL;
int i = 0;
+ uint16_t session_id = 0;
if ((data == NULL) || (priv == NULL)) {
pr_err("%s: data or priv is NULL\n", __func__);
@@ -4188,6 +4265,33 @@
c = priv;
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ v = voice_get_session(session_id);
+ if (v != NULL)
+ v->voc_state = VOC_ERROR;
+ } else {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvp);
+ c->apr_q6_cvp = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvp_handle = 0;
+ }
+ return 0;
+ }
+
v = voice_get_session(data->dest_port);
if (v == NULL) {
pr_err("%s: v is NULL\n", __func__);
@@ -4195,23 +4299,6 @@
return -EINVAL;
}
- pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
- data->payload_size, data->opcode);
-
- if (data->opcode == RESET_EVENTS) {
- pr_debug("%s: Reset event received in Voice service\n",
- __func__);
-
- apr_reset(c->apr_q6_cvp);
- c->apr_q6_cvp = NULL;
-
- /* Sub-system restart is applicable to all sessions. */
- for (i = 0; i < MAX_VOC_SESSIONS; i++)
- c->voice[i].cvp_handle = 0;
-
- return 0;
- }
-
if (data->opcode == APR_BASIC_RSP_RESULT) {
if (data->payload_size) {
ptr = data->payload;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 6f2824f..aef463f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@
VOC_RUN,
VOC_CHANGE,
VOC_RELEASE,
+ VOC_ERROR,
};
struct mem_buffer {