Merge "ARM: dts: msm: Add volume up key configuration for QRD845"
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 3b4436e..bdba526 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -11,6 +11,8 @@
* "qcom,scm-msm8660" for MSM8660 platforms
* "qcom,scm-msm8690" for MSM8690 platforms
* "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+ * "android,firmware" for firmware image
+ * "android,vbmeta" for setting system properties for verified boot.
- clocks: One to three clocks may be required based on compatible.
* Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
* Core, iface, and bus clocks required for "qcom,scm"
@@ -26,3 +28,26 @@
clock-names = "core", "bus", "iface";
};
};
+
+Example for SDM845:
+
+ firmware {
+ android {
+ compatible = "android,firmware";
+ vbmeta {
+ compatible = "android,vbmeta";
+ parts = "vbmeta,boot,system,vendor,dtbo";
+ };
+
+ fstab {
+ compatible = "android,fstab";
+ vendor {
+ compatible = "android,vendor";
+ dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+ type = "ext4";
+ mnt_flags = "ro,barrier=1,discard";
+ fsmgr_flags = "wait,slotselect,avb";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6405371..f8c8a69 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -160,6 +160,9 @@
"efuse_addr": EFUSE address to read and update analog tune parameter.
"emu_phy_base" : phy base address used for programming emulation target phy.
"ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+ "tcsr_clamp_dig_n" : To enable/disable digital clamp to the phy. When
+ de-asserted, it will prevent random leakage from qusb2 phy resulting from
+ out of sequence turn on/off of 1p8, 3p3 and DVDD regulators.
"refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register.
- clocks: a list of phandles to the PHY clocks. Use as per
Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -179,6 +182,8 @@
- qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
- pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active"
state when attached in host mode and "suspend" state when detached.
+ - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
+ improved rise/fall times.
Example:
qusb_phy: qusb@f9b39000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a491bd7..a37e441 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,6 +24,7 @@
ams AMS AG
amstaos AMS-Taos Inc.
analogix Analogix Semiconductor, Inc.
+android Google
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
index 7819d26..e6dc45a 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
@@ -23,6 +23,9 @@
reg-names = "mc_virt-base", "mem_noc-base",
"system_noc-base", "ipa_virt-base";
+ mbox-names = "apps_rsc";
+ mboxes = <&apps_rsc 0>;
+
/*RSCs*/
rsc_apps: rsc-apps {
cell-id = <MSM_BUS_RSC_APPS>;
@@ -360,7 +363,7 @@
label = "mas-qhm-qpic";
qcom,buswidth = <4>;
qcom,agg-ports = <1>;
- qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+ qcom,connections = <&slv_qns_aggre_noc>;
qcom,bus-dev = <&fab_system_noc>;
qcom,bcms = <&bcm_pn3>;
};
@@ -451,7 +454,7 @@
qcom,buswidth = <8>;
qcom,agg-ports = <1>;
qcom,qport = <1>;
- qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+ qcom,connections = <&slv_qns_aggre_noc>;
qcom,bus-dev = <&fab_system_noc>;
qcom,bcms = <&bcm_ce>, <&bcm_pn5>;
qcom,ap-owned;
@@ -538,7 +541,7 @@
qcom,buswidth = <8>;
qcom,agg-ports = <1>;
qcom,qport = <8>;
- qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+ qcom,connections = <&slv_qns_aggre_noc>;
qcom,bus-dev = <&fab_system_noc>;
qcom,bcms = <&bcm_pn1>;
qcom,ap-owned;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
index 2148cc9..65467f9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
@@ -338,4 +338,39 @@
};
};
};
+
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmxpoorwills_vadc 0x4c>;
+ thermal-governor = "user_space";
+ };
+
+ pa-therm1-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmxpoorwills_vadc 0x4d>;
+ thermal-governor = "user_space";
+ };
+
+ pa-therm2-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmxpoorwills_vadc 0x4e>;
+ thermal-governor = "user_space";
+ };
+
+ mdm-case-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmxpoorwills_vadc 0x4f>;
+ thermal-governor = "user_space";
+ };
+
+ ambient-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmxpoorwills_vadc 0x52>;
+ thermal-governor = "user_space";
+ };
};
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 46eb60b..d0568aa 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -313,6 +313,8 @@
CONFIG_MSM_QMP=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_MSM_SMEM=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -329,6 +331,7 @@
CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_PM=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 4761bc5..8ca4247 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -43,7 +43,6 @@
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select MSM_CORTEX_A7
- select COMMON_CLK_MSM
select PINCTRL
select QCOM_SCM if SMP
select MSM_JTAG_MM if CORESIGHT_ETM
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6531949..96f43d6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -124,6 +124,7 @@
config ARCH_SDM845
bool "Enable Support for Qualcomm Technologies Inc. SDM845"
depends on ARCH_QCOM
+ select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
help
@@ -133,6 +134,7 @@
config ARCH_SDM670
bool "Enable Support for Qualcomm Technologies Inc. SDM670"
depends on ARCH_QCOM
+ select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
help
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 7705d01..1990b65 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -17,42 +17,42 @@
qcom,fastchg-current-ma = <3450>;
qcom,batt-id-kohm = <60>;
qcom,battery-beta = <3435>;
- qcom,battery-type = "ascent_3450mah_averaged_masterslave_jul11th2017";
- qcom,checksum = <0x7C33>;
+ qcom,battery-type = "ascent_3450mah_averaged_masterslave_oct30th2017";
+ qcom,checksum = <0xAAE2>;
qcom,gui-version = "PMI8998GUI - 2.0.0.58";
qcom,fg-profile-data = [
8F 1F 94 05
73 0A 4A 06
27 1D 21 EA
- 16 0A 3B 0C
+ 16 0A 3A 0C
07 18 97 22
A5 3C EC 4A
5C 00 00 00
10 00 00 00
- 00 00 92 BC
- CD BD 02 B4
+ 00 00 43 C5
+ 92 BC 89 BB
11 00 08 00
69 DA AD 07
4B FD 19 FA
- 1D 0C B0 0C
+ 7E 01 49 13
EB F3 78 3B
24 06 09 20
27 00 14 00
7E 1F F2 05
- 19 0A 55 FD
- 6C 1D C6 ED
+ 19 0A AB 06
+ 6C 1D B9 07
1A 12 FF 1D
6F 18 EB 22
B9 45 6F 52
55 00 00 00
0E 00 00 00
- 00 00 A1 D5
- 34 BA A0 CA
+ 00 00 33 CC
+ 72 CA B3 C4
0F 00 00 00
93 00 AD 07
8D FD F6 00
- BA 0D 5C 04
- B3 FC F4 1B
+ 6F E3 44 0B
+ AB FC F9 1B
C3 33 CC FF
07 10 00 00
A4 0D 99 45
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index a9ca87c..0a2b814 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -257,6 +257,229 @@
qcom,pipe-attr-ee;
};
+ thermal_zones: thermal-zones {
+ mdm-core-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ qdsp-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ camera-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc1_cpu0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc1_cpu1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc1_cpu2-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc1_cpu3-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc1_l2-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc0_cpu0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc0_cpu1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc0_cpu2-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 11>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc0_cpu3-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 12>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ apc0_l2-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 13>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 14>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 15>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ tsens0: tsens@4a8000 {
+ compatible = "qcom,msm8953-tsens";
+ reg = <0x4a8000 0x1000>,
+ <0x4a9000 0x1000>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 184 0>, <0 314 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
blsp1_uart0: serial@78af000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x78af000 0x200>;
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 60162e3..0ddb9f5 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -82,6 +82,12 @@
mpp@a100 {
reg = <0xa100 0x100>;
qcom,pin-num = <2>;
+ /* MPP2 - PA_THERM config */
+ qcom,mode = <4>; /* AIN input */
+ qcom,invert = <1>; /* Enable MPP */
+ qcom,ain-route = <1>; /* AMUX 6 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
};
mpp@a200 {
@@ -93,6 +99,12 @@
mpp@a300 {
reg = <0xa300 0x100>;
qcom,pin-num = <4>;
+ /* MPP4 - CASE_THERM config */
+ qcom,mode = <4>; /* AIN input */
+ qcom,invert = <1>; /* Enable MPP */
+ qcom,ain-route = <3>; /* AMUX 8 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
};
};
@@ -165,6 +177,28 @@
qcom,adc-vdd-reference = <1800>;
qcom,vadc-poll-eoc;
+ chan@5 {
+ label = "vcoin";
+ reg = <5>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@7 {
+ label = "vph_pwr";
+ reg = <7>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
chan@8 {
label = "die_temp";
reg = <8>;
@@ -208,6 +242,63 @@
qcom,hw-settle-time = <0>;
qcom,fast-avg-setup = <0>;
};
+
+ chan@36 {
+ label = "pa_therm0";
+ reg = <0x36>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@11 {
+ label = "pa_therm1";
+ reg = <0x11>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+
+ chan@32 {
+ label = "xo_therm";
+ reg = <0x32>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@3c {
+ label = "xo_therm_buf";
+ reg = <0x3c>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+ chan@13 {
+ label = "case_therm";
+ reg = <0x13>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
};
pm8953_adc_tm: vadc@3400 {
@@ -224,7 +315,6 @@
qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1800>;
qcom,adc_tm-vadc = <&pm8953_vadc>;
-
};
pm8953_rtc: qcom,pm8953_rtc {
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 007081a..2f4b00e 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -103,6 +103,7 @@
qcom,thermal-mitigation
= <3000000 1500000 1000000 500000>;
qcom,auto-recharge-soc;
+ qcom,suspend-input-on-debug-batt;
qcom,chgr@1000 {
reg = <0x1000 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index dd35a36..fe88aae 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -186,12 +186,4 @@
reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
reg-names = "phys_addr_base", "offset_addr";
};
-
- pdc: interrupt-controller@b220000{
- compatible = "qcom,pdc-sdm670";
- reg = <0xb220000 0x400>;
- #interrupt-cells = <3>;
- interrupt-parent = <&intc>;
- interrupt-controller;
- };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 0502312..8db4013 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -464,10 +464,22 @@
#size-cells = <2>;
ranges;
- removed_regions: removed_regions@85700000 {
+ hyp_region: hyp_region@85700000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x85700000 0 0x3800000>;
+ reg = <0 0x85700000 0 0x600000>;
+ };
+
+ xbl_region: xbl_region@85e00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x85e00000 0 0x100000>;
+ };
+
+ removed_region: removed_region@85fc0000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x85fc0000 0 0x2f40000>;
};
pil_camera_mem: camera_region@8ab00000 {
@@ -689,6 +701,14 @@
interrupt-parent = <&intc>;
};
+ pdc: interrupt-controller@b220000{
+ compatible = "qcom,pdc-sdm670";
+ reg = <0xb220000 0x400>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <1 1 0xf08>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 829dfcc..7d83184 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -33,6 +33,12 @@
qcom,ion-heap-type = "DMA";
};
+ qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+ reg = <19>;
+ memory-region = <&qseecom_ta_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
qcom,ion-heap@13 { /* SECURE SPSS HEAP */
reg = <13>;
memory-region = <&secure_sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 8f1afe9..b24ef1d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,12 +139,4 @@
reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
reg-names = "phys_addr_base", "offset_addr";
};
-
- pdc: interrupt-controller@0xb220000{
- compatible = "qcom,pdc-sdm845";
- reg = <0xb220000 0x400>;
- #interrupt-cells = <3>;
- interrupt-parent = <&intc>;
- interrupt-controller;
- };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index da4d41c..9672b94 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -296,7 +296,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm8998_l7: regulator-l7 {
regulator-name = "pm8998_l7";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -479,7 +479,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm8998_l17: regulator-l17 {
regulator-name = "pm8998_l17";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -587,7 +587,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm8998_l23: regulator-l23 {
regulator-name = "pm8998_l23";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -624,7 +624,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm8998_l25: regulator-l25 {
regulator-name = "pm8998_l25";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5b3178d..1ce9f1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -603,6 +603,14 @@
size = <0 0x1400000>;
};
+ qseecom_ta_mem: qseecom_ta_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1000000>;
+ };
+
secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
@@ -754,6 +762,14 @@
interrupt-parent = <&intc>;
};
+ pdc: interrupt-controller@b220000{
+ compatible = "qcom,pdc-sdm845";
+ reg = <0xb220000 0x400>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <1 1 0xf08>,
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index e225ede..0e6e6f8 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -415,7 +415,6 @@
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index b78b7a0..a8634aa 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -426,7 +426,6 @@
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 31e5b76..ba71ce8 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
- depends on OF && COMMON_CLK
+ depends on OF
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
@@ -39,6 +39,7 @@
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
+ depends on COMMON_CLK
default n
help
Compile in support for HDMI driver in msm drm
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5991cd5..89453b0 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -405,6 +405,7 @@
#define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0xF810
#define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00010
+#define A6XX_RBBM_GPR0_CNTL 0x00018
#define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f
#define A6XX_RBBM_INT_CLEAR_CMD 0x00037
#define A6XX_RBBM_INT_0_MASK 0x00038
@@ -809,7 +810,7 @@
/* GBIF registers */
#define A6XX_GBIF_HALT 0x3c45
#define A6XX_GBIF_HALT_ACK 0x3c46
-#define A6XX_GBIF_HALT_MASK 0x1
+#define A6XX_GBIF_HALT_MASK 0x2
#define A6XX_GBIF_PERF_PWR_CNT_EN 0x3cc0
#define A6XX_GBIF_PERF_CNT_SEL 0x3cc2
@@ -982,37 +983,8 @@
/* ISENSE registers */
#define A6XX_GMU_ISENSE_CTRL 0x1F95D
-#define A6XX_GPU_CS_ENABLE_REG 0x23120
#define A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x1f95d
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x22d78
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x22d58
-#define A6XX_GPU_CS_A_SENSOR_CTRL_0 0x22d80
-#define A6XX_GPU_CS_A_SENSOR_CTRL_2 0x422da
-#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x2301a
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x23157
-#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x2301a
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x2301d
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x2301f
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x23021
-#define A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x23165
-#define A6XX_GPU_CS_AMP_PERIOD_CTRL 0x2316d
-#define A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x23165
-
-#define CS_PWR_ON_STATUS (10)
-#define AMP_SW_WRM_TRIM_START (24)
-#define AMP_TRIM_TIMER (6)
-#define AMP_SW_TRIM_START (0)
-#define SS_AMPTRIM_DONE (11)
-#define AMP_OFFSET_CHECK_MIN_ERR (1)
-#define AMP_OFFSET_CHECK_MAX_ERR (2)
-#define AMP_OUT_OF_RANGE_ERR (4)
-#define TRIM_CNT_VALUE (1)
-#define RUNTIME_CNT_VALUE (16)
-#define TRIM_ENABLE (0)
-
-#define AMP_ERR (BIT(AMP_OFFSET_CHECK_MIN_ERR) || \
- BIT(AMP_OFFSET_CHECK_MAX_ERR) || \
- BIT(AMP_OUT_OF_RANGE_ERR))
+#define A6XX_GPU_CS_ENABLE_REG 0x23120
/* LM registers */
#define A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x1F94D
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 770cf3b..08cd06b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -347,7 +347,7 @@
.minor = 0,
.patchid = ANY_ID,
.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
- ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM |
+ ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
ADRENO_IOCOHERENT,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index b77f6e1..0dd1921 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -226,6 +226,10 @@
#define ADRENO_HWCG_CTRL 3
#define ADRENO_THROTTLING_CTRL 4
+/* VBIF, GBIF halt request and ack mask */
+#define GBIF_HALT_REQUEST 0x1E0
+#define VBIF_RESET_ACK_MASK 0x00f0
+#define VBIF_RESET_ACK_TIMEOUT 100
/* number of throttle counters for DCVS adjustment */
#define ADRENO_GPMU_THROTTLE_COUNTERS 4
@@ -683,6 +687,8 @@
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+ ADRENO_REG_RBBM_GPR0_CNTL,
+ ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
ADRENO_REG_VBIF_XIN_HALT_CTRL0,
ADRENO_REG_VBIF_XIN_HALT_CTRL1,
ADRENO_REG_VBIF_VERSION,
@@ -1889,17 +1895,15 @@
* @ack_reg: register offset to wait for acknowledge
*/
static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device,
- int ack_reg)
+ int ack_reg, unsigned int mask)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned long wait_for_vbif;
- unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
unsigned int val;
int ret = 0;
/* wait for the transactions to clear */
- wait_for_vbif = jiffies + msecs_to_jiffies(100);
+ wait_for_vbif = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
while (1) {
adreno_readreg(adreno_dev, ack_reg,
&val);
@@ -1929,15 +1933,27 @@
int ret = 0;
if (adreno_has_gbif(adreno_dev)) {
+ /*
+ * Halt GBIF GX first and then CX part.
+ * Need to release CX Halt explicitly in case of SW_RESET.
+ * GX Halt release will be taken care by SW_RESET internally.
+ */
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
+ GBIF_HALT_REQUEST);
+ ret = adreno_wait_for_vbif_halt_ack(device,
+ ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+ VBIF_RESET_ACK_MASK);
+ if (ret)
+ return ret;
+
adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
ret = adreno_wait_for_vbif_halt_ack(device,
- ADRENO_REG_GBIF_HALT_ACK);
- adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0);
+ ADRENO_REG_GBIF_HALT_ACK, mask);
} else {
adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
mask);
ret = adreno_wait_for_vbif_halt_ack(device,
- ADRENO_REG_VBIF_XIN_HALT_CTRL1);
+ ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask);
adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
}
return ret;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 83dd3fb..09d6a10 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -43,6 +43,8 @@
#define A6XX_GPU_CX_REG_BASE 0x509E000
#define A6XX_GPU_CX_REG_SIZE 0x1000
+#define GPU_LIMIT_THRESHOLD_ENABLE BIT(31)
+
static int _load_gmu_firmware(struct kgsl_device *device);
static const struct adreno_vbif_data a630_vbif[] = {
@@ -758,6 +760,38 @@
a6xx_preemption_start(adreno_dev);
a6xx_protect_init(adreno_dev);
+
+ /*
+ * We start LM here because we want all the following to be up
+ * 1. GX HS
+ * 2. SPTPRAC
+ * 3. HFI
+ * At this point, we are guaranteed all.
+ */
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+ test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
+ int result;
+ struct gmu_device *gmu = &device->gmu;
+ struct device *dev = &gmu->pdev->dev;
+
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
+ GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
+ kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
+
+ gmu->lm_config.lm_type = 1;
+ gmu->lm_config.lm_sensor_type = 1;
+ gmu->lm_config.throttle_config = 1;
+ gmu->lm_config.idle_throttle_en = 0;
+ gmu->lm_config.acd_en = 0;
+ gmu->bcl_config = 0;
+ gmu->lm_dcvs_level = 0;
+
+ result = hfi_send_lmconfig(gmu);
+ if (result)
+ dev_err(dev, "Failure enabling limits management (%d)\n",
+ result);
+ }
}
/*
@@ -1713,80 +1747,6 @@
return 0;
}
-#define KMASK(start, n) (GENMASK((start + n), (start)))
-
-static void isense_cold_trimm(struct kgsl_device *device)
-{
- unsigned int reg;
- struct gmu_device *gmu = &device->gmu;
-
- kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 0);
-
- kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3,
- 0x00000F8F);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2,
- 0x00705161);
- udelay(10);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0x3);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_0, 0x10040a);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_2, 0x10040a);
-
- kgsl_gmu_regread(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS, ®);
- if ((reg & BIT(CS_PWR_ON_STATUS)) != (1 << CS_PWR_ON_STATUS)) {
- dev_err(&gmu->pdev->dev, "ERROR - ISENSE power-up\n");
- return;
- }
-
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
- KMASK(AMP_TRIM_TIMER, 15), 70 << AMP_TRIM_TIMER);
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
- KMASK(AMP_SW_TRIM_START, 1), 0 << AMP_SW_TRIM_START);
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
- KMASK(AMP_SW_TRIM_START, 1), 1 << AMP_SW_TRIM_START);
-
- if (timed_poll_check(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS,
- BIT(SS_AMPTRIM_DONE), GMU_START_TIMEOUT,
- BIT(SS_AMPTRIM_DONE))) {
- dev_err(&gmu->pdev->dev, "ISENSE SS_AMPTRIM failure\n");
- return;
- }
-
- kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0, ®);
- if (reg & AMP_ERR) {
- kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0,
- ®);
- dev_err(&gmu->pdev->dev,
- "ISENSE ERROR:trimming GX 0x%08x\n", reg);
- return;
- }
-
- kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2, ®);
- if (reg & AMP_ERR) {
- kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2,
- ®);
- dev_err(&gmu->pdev->dev,
- "ISENSE ERROR:trimming SPTPRAC 0x%08x\n", reg);
- return;
- }
-
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
- KMASK(TRIM_CNT_VALUE, 13), 20 << TRIM_CNT_VALUE);
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
- KMASK(RUNTIME_CNT_VALUE, 9), 50 << RUNTIME_CNT_VALUE);
-
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
- KMASK(TRIM_ENABLE, 1), 1 << TRIM_ENABLE);
- udelay(4);
- kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
- KMASK(TRIM_ENABLE, 1), 0 << TRIM_ENABLE);
- kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
-
-}
-
-#define GPU_LIMIT_THRESHOLD_ENABLE BIT(31)
/*
* a6xx_gmu_fw_start() - set up GMU and start FW
* @device: Pointer to KGSL device
@@ -1867,13 +1827,6 @@
kgsl_gmu_regwrite(device, A6XX_GMU_HFI_SFR_ADDR, chipid);
- if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
- test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
- kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
- GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
- isense_cold_trimm(device);
- }
-
/* Configure power control and bring the GMU out of reset */
a6xx_gmu_power_config(device);
ret = a6xx_gmu_start(device);
@@ -2096,8 +2049,7 @@
return _load_gmu_firmware(device);
}
-#define VBIF_RESET_ACK_TIMEOUT 100
-#define VBIF_RESET_ACK_MASK 0x00f0
+#define GBIF_CX_HALT_MASK BIT(1)
static int a6xx_soft_reset(struct adreno_device *adreno_dev)
{
@@ -2138,6 +2090,13 @@
if (!vbif_acked)
return -ETIMEDOUT;
+ /*
+ * GBIF GX halt will be released automatically by sw_reset.
+ * Release GBIF CX halt after sw_reset
+ */
+ if (adreno_has_gbif(adreno_dev))
+ kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0);
+
a6xx_sptprac_enable(adreno_dev);
return 0;
@@ -2354,8 +2313,14 @@
udelay(100);
}
- if (acked)
- ret = adreno_soft_reset(device);
+ if (acked) {
+ /* Make sure VBIF/GBIF is cleared before resetting */
+ ret = adreno_vbif_clear_pending_transactions(device);
+
+ if (ret == 0)
+ ret = adreno_soft_reset(device);
+ }
+
if (ret)
KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
}
@@ -3692,6 +3657,9 @@
A6XX_VBIF_XIN_HALT_CTRL0),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
A6XX_VBIF_XIN_HALT_CTRL1),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+ A6XX_RBBM_VBIF_GX_RESET_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 3a5b489..daac9f1 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -611,24 +611,6 @@
if (result)
return result;
- if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
- test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
- gmu->lm_config.lm_type = 1;
- gmu->lm_config.lm_sensor_type = 1;
- gmu->lm_config.throttle_config = 1;
- gmu->lm_config.idle_throttle_en = 0;
- gmu->lm_config.acd_en = 0;
- gmu->bcl_config = 0;
- gmu->lm_dcvs_level = 0;
-
- result = hfi_send_lmconfig(gmu);
- if (result) {
- dev_err(dev, "Failure enabling LM (%d)\n",
- result);
- return result;
- }
- }
-
/* Tell the GMU we are sending no more HFIs until the next boot */
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
result = hfi_send_test(gmu);
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 105599c..b24509d 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -360,4 +360,5 @@
uint32_t bw_idx, enum rpm_ack_type ack_type);
int hfi_notify_slumber(struct gmu_device *gmu, uint32_t init_perf_idx,
uint32_t init_bw_idx);
+int hfi_send_lmconfig(struct gmu_device *gmu);
#endif /* __KGSL_HFI_H */
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 90920d9..d274490 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -741,6 +741,10 @@
&ipa3_usb_ctx->ttype_ctx[ttype];
int result;
+ /* create PM resources for the first tethering protocol only */
+ if (ipa3_usb_ctx->num_init_prot > 0)
+ return 0;
+
memset(&ttype_ctx->pm_ctx.reg_params, 0,
sizeof(ttype_ctx->pm_ctx.reg_params));
ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 716ec79..57b988b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4651,7 +4651,7 @@
}
if (count > 0)
- dbg_buff[count - 1] = '\0';
+ dbg_buff[count] = '\0';
IPADBG("user input string %s\n", dbg_buff);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 3bf0327..be342cb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1030,8 +1030,9 @@
client->state);
spin_unlock_irqrestore(&client->state_lock, flags);
} else if (client->state ==
- IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
- IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+ IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
+ client->state ==
+ IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
run_algorithm = true;
client->state = IPA_PM_DEACTIVATED;
IPA_PM_DBG_STATE(client->hdl, client->name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6a5e85b..7421eb8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1583,7 +1583,6 @@
clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
- clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
break;
case IPA_RM_RESOURCE_MHI_CONS:
clients->names[i++] = IPA_CLIENT_MHI_CONS;
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 98861de..195799e 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -612,7 +612,8 @@
p_events[ev_ring_idx].rp =
(u32)event_ring_bufs[ev_ring_idx].phys_base;
p_events[ev_ring_idx].wp =
- (u32)event_ring_bufs[ev_ring_idx].phys_base;
+ (u32)event_ring_bufs[ev_ring_idx].phys_base +
+ event_ring_bufs[ev_ring_idx].size - 16;
} else {
IPA_UT_LOG("Skip configuring event ring - already done\n");
}
@@ -3261,11 +3262,11 @@
IPA_UT_ADD_TEST(suspend_resume_with_open_aggr,
"several suspend/resume iterations with open aggregation frame",
ipa_mhi_test_in_loop_suspend_resume_aggr_open,
- true, IPA_HW_v3_0, IPA_HW_MAX),
+ true, IPA_HW_v3_0, IPA_HW_v3_5_1),
IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr,
"several force suspend/resume iterations with open aggregation frame",
ipa_mhi_test_in_loop_force_suspend_resume_aggr_open,
- true, IPA_HW_v3_0, IPA_HW_MAX),
+ true, IPA_HW_v3_0, IPA_HW_v3_5_1),
IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup,
"several suspend and host wakeup resume iterations",
ipa_mhi_test_in_loop_suspend_host_wakeup,
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 93121df..95e3782 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -321,10 +321,18 @@
struct usb_bam_ctx_type *ctx = &msm_usb_bam[pipe_connect->bam_type];
struct sps_mem_buffer *data_buf = &(pipe_connect->data_mem_buf);
struct sps_mem_buffer *desc_buf = &(pipe_connect->desc_mem_buf);
+ struct device *dev = &ctx->usb_bam_pdev->dev;
+ struct sg_table data_sgt, desc_sgt;
+ dma_addr_t data_iova, desc_iova;
+ u32 data_fifo_size;
pr_debug("%s: data_fifo size:%x desc_fifo_size:%x\n",
__func__, pipe_connect->data_fifo_size,
pipe_connect->desc_fifo_size);
+
+ if (dev->parent)
+ dev = dev->parent;
+
switch (pipe_connect->mem_type) {
case SPS_PIPE_MEM:
log_event_dbg("%s: USB BAM using SPS pipe memory\n", __func__);
@@ -366,7 +374,16 @@
ret = -ENOMEM;
goto err_exit;
}
+
memset_io(data_buf->base, 0, data_buf->size);
+ data_buf->iova = dma_map_resource(dev, data_buf->phys_base,
+ data_buf->size, DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, data_buf->iova))
+ log_event_err("%s(): oci_mem: err mapping data_buf\n",
+ __func__);
+ log_event_dbg("%s: data_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+ __func__, dev_name(dev), data_buf->base,
+ (unsigned long)data_buf->phys_base, data_buf->iova);
desc_buf->phys_base = pipe_connect->desc_fifo_base_offset +
ctx->usb_bam_data->usb_bam_fifo_baseaddr;
@@ -380,6 +397,16 @@
goto err_exit;
}
memset_io(desc_buf->base, 0, desc_buf->size);
+ desc_buf->iova = dma_map_resource(dev, desc_buf->phys_base,
+ desc_buf->size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, desc_buf->iova))
+ log_event_err("%s(): oci_mem: err mapping desc_buf\n",
+ __func__);
+
+ log_event_dbg("%s: desc_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+ __func__, dev_name(dev), desc_buf->base,
+ (unsigned long)desc_buf->phys_base, desc_buf->iova);
break;
case SYSTEM_MEM:
log_event_dbg("%s: USB BAM using system memory\n", __func__);
@@ -391,56 +418,57 @@
}
/* BAM would use system memory, allocate FIFOs */
- data_buf->size = pipe_connect->data_fifo_size;
+ data_fifo_size = data_buf->size = pipe_connect->data_fifo_size;
/* On platforms which use CI controller, USB HW can fetch
* additional 128 bytes at the end of circular buffer when
* AXI prefetch is enabled and hence requirement is to
* allocate 512 bytes more than required length.
*/
if (pipe_connect->bam_type == CI_CTRL)
- data_buf->base =
- dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
- (pipe_connect->data_fifo_size +
- DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
- &(data_buf->phys_base),
- GFP_KERNEL);
- else
- data_buf->base =
- dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
- pipe_connect->data_fifo_size,
- &(data_buf->phys_base),
- GFP_KERNEL);
+ data_fifo_size += DATA_FIFO_EXTRA_MEM_ALLOC_SIZE;
+
+ data_buf->base = dma_alloc_attrs(dev, data_fifo_size,
+ &data_iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!data_buf->base) {
- log_event_err("%s: dma_alloc_coherent failed for data fifo\n",
+ log_event_err("%s: data_fifo: dma_alloc_attr failed\n",
__func__);
ret = -ENOMEM;
goto err_exit;
}
memset(data_buf->base, 0, pipe_connect->data_fifo_size);
+ data_buf->iova = data_iova;
+ dma_get_sgtable(dev, &data_sgt, data_buf->base, data_buf->iova,
+ data_fifo_size);
+ data_buf->phys_base = page_to_phys(sg_page(data_sgt.sgl));
+ sg_free_table(&data_sgt);
+ log_event_dbg("%s: data_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+ __func__, dev_name(dev), data_buf->base,
+ (unsigned long)data_buf->phys_base, data_buf->iova);
+
desc_buf->size = pipe_connect->desc_fifo_size;
- desc_buf->base = dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
- pipe_connect->desc_fifo_size,
- &(desc_buf->phys_base),
- GFP_KERNEL);
+ desc_buf->base = dma_alloc_attrs(dev,
+ pipe_connect->desc_fifo_size,
+ &desc_iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!desc_buf->base) {
- log_event_err("%s: dma_alloc_coherent failed for desc fifo\n",
+ log_event_err("%s: desc_fifo: dma_alloc_attr failed\n",
__func__);
- if (pipe_connect->bam_type == CI_CTRL)
- dma_free_coherent(&ctx->usb_bam_pdev->dev,
- (pipe_connect->data_fifo_size +
- DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
- data_buf->base,
- data_buf->phys_base);
- else
- dma_free_coherent(&ctx->usb_bam_pdev->dev,
- pipe_connect->data_fifo_size,
- data_buf->base,
- data_buf->phys_base);
+ dma_free_attrs(dev, data_fifo_size, data_buf->base,
+ data_buf->iova, DMA_ATTR_FORCE_CONTIGUOUS);
ret = -ENOMEM;
goto err_exit;
}
memset(desc_buf->base, 0, pipe_connect->desc_fifo_size);
+ desc_buf->iova = desc_iova;
+ dma_get_sgtable(dev, &desc_sgt, desc_buf->base, desc_buf->iova,
+ desc_buf->size);
+ desc_buf->phys_base = page_to_phys(sg_page(desc_sgt.sgl));
+ sg_free_table(&desc_sgt);
+ log_event_dbg("%s: desc_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+ __func__, dev_name(dev), desc_buf->base,
+ (unsigned long)desc_buf->phys_base, desc_buf->iova);
break;
default:
log_event_err("%s: invalid mem type\n", __func__);
@@ -476,35 +504,40 @@
&ctx->usb_bam_connections[idx];
struct sps_connect *sps_connection =
&ctx->usb_bam_sps.sps_connections[idx];
+ struct device *dev = &ctx->usb_bam_pdev->dev;
+ u32 data_fifo_size;
pr_debug("%s(): data size:%x desc size:%x\n",
__func__, sps_connection->data.size,
sps_connection->desc.size);
+ if (dev->parent)
+ dev = dev->parent;
+
switch (pipe_connect->mem_type) {
case SYSTEM_MEM:
log_event_dbg("%s: Freeing system memory used by PIPE\n",
__func__);
- if (sps_connection->data.phys_base) {
+ if (sps_connection->data.iova) {
+ data_fifo_size = sps_connection->data.size;
if (cur_bam == CI_CTRL)
- dma_free_coherent(&ctx->usb_bam_pdev->dev,
- (sps_connection->data.size +
- DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
+ data_fifo_size +=
+ DATA_FIFO_EXTRA_MEM_ALLOC_SIZE;
+
+ dma_free_attrs(dev, data_fifo_size,
sps_connection->data.base,
- sps_connection->data.phys_base);
- else
- dma_free_coherent(&ctx->usb_bam_pdev->dev,
- sps_connection->data.size,
- sps_connection->data.base,
- sps_connection->data.phys_base);
+ sps_connection->data.iova,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ sps_connection->data.iova = 0;
sps_connection->data.phys_base = 0;
pipe_connect->data_mem_buf.base = NULL;
}
- if (sps_connection->desc.phys_base) {
- dma_free_coherent(&ctx->usb_bam_pdev->dev,
- sps_connection->desc.size,
+ if (sps_connection->desc.iova) {
+ dma_free_attrs(dev, sps_connection->desc.size,
sps_connection->desc.base,
- sps_connection->desc.phys_base);
+ sps_connection->desc.iova,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ sps_connection->desc.iova = 0;
sps_connection->desc.phys_base = 0;
pipe_connect->desc_mem_buf.base = NULL;
}
@@ -512,11 +545,25 @@
case OCI_MEM:
log_event_dbg("Freeing oci memory used by BAM PIPE\n");
if (sps_connection->data.base) {
+ if (sps_connection->data.iova) {
+ dma_unmap_resource(dev,
+ sps_connection->data.iova,
+ sps_connection->data.size,
+ DMA_BIDIRECTIONAL, 0);
+ sps_connection->data.iova = 0;
+ }
iounmap(sps_connection->data.base);
sps_connection->data.base = NULL;
pipe_connect->data_mem_buf.base = NULL;
}
if (sps_connection->desc.base) {
+ if (sps_connection->desc.iova) {
+ dma_unmap_resource(dev,
+ sps_connection->desc.iova,
+ sps_connection->desc.size,
+ DMA_BIDIRECTIONAL, 0);
+ sps_connection->desc.iova = 0;
+ }
iounmap(sps_connection->desc.base);
sps_connection->desc.base = NULL;
pipe_connect->desc_mem_buf.base = NULL;
@@ -530,7 +577,8 @@
return 0;
}
-static int connect_pipe(enum usb_ctrl cur_bam, u8 idx, u32 *usb_pipe_idx)
+static int connect_pipe(enum usb_ctrl cur_bam, u8 idx, u32 *usb_pipe_idx,
+ unsigned long iova)
{
int ret;
struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
@@ -575,9 +623,11 @@
if (dir == USB_TO_PEER_PERIPHERAL) {
sps_connection->mode = SPS_MODE_SRC;
*usb_pipe_idx = pipe_connect->src_pipe_index;
+ sps_connection->dest_iova = iova;
} else {
sps_connection->mode = SPS_MODE_DEST;
*usb_pipe_idx = pipe_connect->dst_pipe_index;
+ sps_connection->source_iova = iova;
}
sps_connection->data = *data_buf;
@@ -1059,7 +1109,34 @@
return 0;
}
-int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
+int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+ phys_addr_t *p_addr, u32 *bam_size)
+{
+ int ret = 0;
+ struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
+ struct usb_bam_pipe_connect *pipe_connect =
+ &ctx->usb_bam_connections[idx];
+ unsigned long peer_bam_handle;
+
+ ret = sps_phy2h(pipe_connect->dst_phy_addr, &peer_bam_handle);
+ if (ret) {
+ log_event_err("%s: sps_phy2h failed (src BAM) %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = sps_get_bam_addr(peer_bam_handle, p_addr, bam_size);
+ if (ret) {
+ log_event_err("%s: sps_get_bam_addr failed%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx,
+ unsigned long iova)
{
int ret;
struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
@@ -1110,7 +1187,7 @@
/* Set the BAM mode (host/device) according to connected pipe */
info[cur_bam].cur_bam_mode = pipe_connect->bam_mode;
- ret = connect_pipe(cur_bam, idx, bam_pipe_idx);
+ ret = connect_pipe(cur_bam, idx, bam_pipe_idx, iova);
if (ret) {
log_event_err("%s: pipe connection[%d] failure\n",
__func__, idx);
@@ -3024,6 +3101,7 @@
struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
struct sps_bam_props props;
+ struct device *dev;
memset(&props, 0, sizeof(props));
@@ -3059,8 +3137,16 @@
pr_debug("Register and enable HSUSB BAM\n");
props.options |= SPS_BAM_OPT_ENABLE_AT_BOOT;
}
- ret = sps_register_bam_device(&props, &ctx->h_bam);
+ dev = &ctx->usb_bam_pdev->dev;
+ if (dev && dev->parent && !device_property_present(dev->parent,
+ "qcom,smmu-s1-bypass")) {
+ pr_info("%s: setting SPS_BAM_SMMU_EN flag with (%s)\n",
+ __func__, dev_name(dev));
+ props.options |= SPS_BAM_SMMU_EN;
+ }
+
+ ret = sps_register_bam_device(&props, &ctx->h_bam);
if (ret < 0) {
log_event_err("%s: register bam error %d\n", __func__, ret);
return -EFAULT;
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index bfc401a..5b31889 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -301,12 +301,29 @@
__raw_writel(0x7766550a, restart_reason);
} else if (!strncmp(cmd, "oem-", 4)) {
unsigned long code;
+ unsigned long reset_reason;
int ret;
ret = kstrtoul(cmd + 4, 16, &code);
- if (!ret)
+ if (!ret) {
+ /* Bit-2 to bit-7 of SOFT_RB_SPARE for hard
+ * reset reason:
+ * Value 0 to 31 for common defined features
+ * Value 32 to 63 for oem specific features
+ */
+ reset_reason = code +
+ PON_RESTART_REASON_OEM_MIN;
+ if (reset_reason > PON_RESTART_REASON_OEM_MAX ||
+ reset_reason < PON_RESTART_REASON_OEM_MIN) {
+ pr_err("Invalid oem reset reason: %lx\n",
+ reset_reason);
+ } else {
+ qpnp_pon_set_restart_reason(
+ reset_reason);
+ }
__raw_writel(0x6f656d00 | (code & 0xff),
restart_reason);
+ }
} else if (!strncmp(cmd, "edl", 3)) {
enable_emergency_dload_mode();
} else {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a6bc1da..8060142 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6146,7 +6146,7 @@
out:
ufshcd_scsi_unblock_requests(hba);
- pm_runtime_put_sync(hba->dev);
+ pm_runtime_put(hba->dev);
return;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 720ac31..c1103c7 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1523,6 +1523,11 @@
}
buffer = dmabuf->priv;
+ if (!is_buffer_hlos_assigned(buffer)) {
+ pr_err("%s: cannot sync a secure dmabuf\n", __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index c7b58ce..9d53391 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -88,6 +88,10 @@
.name = ION_QSECOM_HEAP_NAME,
},
{
+ .id = ION_QSECOM_TA_HEAP_ID,
+ .name = ION_QSECOM_TA_HEAP_NAME,
+ },
+ {
.id = ION_SPSS_HEAP_ID,
.name = ION_SPSS_HEAP_NAME,
},
@@ -340,7 +344,7 @@
if (!ION_IS_CACHED(flags))
return 0;
- if (flags & ION_FLAG_SECURE)
+ if (!is_buffer_hlos_assigned(ion_handle_buffer(handle)))
return 0;
table = ion_sg_table(client, handle);
@@ -675,6 +679,20 @@
return -EINVAL;
}
+bool is_buffer_hlos_assigned(struct ion_buffer *buffer)
+{
+ bool is_hlos = false;
+
+ if (buffer->heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA &&
+ (buffer->flags & ION_FLAG_CP_HLOS))
+ is_hlos = true;
+
+ if (get_secure_vmid(buffer->flags) <= 0)
+ is_hlos = true;
+
+ return is_hlos;
+}
+
int get_vmid(unsigned long flags)
{
int vmid;
@@ -751,9 +769,9 @@
down_read(&mm->mmap_sem);
- start = (unsigned long)data.flush_data.vaddr;
- end = (unsigned long)data.flush_data.vaddr
- + data.flush_data.length;
+ start = (unsigned long)data.flush_data.vaddr +
+ data.flush_data.offset;
+ end = start + data.flush_data.length;
if (check_vaddr_bounds(start, end)) {
pr_err("%s: virtual address %pK is out of bounds\n",
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
index 741d017..ad7b1c5 100644
--- a/drivers/staging/android/ion/msm/msm_ion.h
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -174,6 +174,8 @@
void *vaddr, unsigned int offset, unsigned long len,
unsigned int cmd);
+bool is_buffer_hlos_assigned(struct ion_buffer *buffer);
+
#else
static inline struct ion_client *msm_ion_client_create(const char *name)
{
@@ -202,6 +204,10 @@
return -ENODEV;
}
+static bool is_buffer_hlos_assigned(struct ion_buffer *buffer)
+{
+ return true;
+}
#endif /* CONFIG_ION */
#endif
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 8d67f76..4747949 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -221,6 +221,22 @@
return 0;
}
+static int test_task_state(struct task_struct *p, int state)
+{
+ struct task_struct *t;
+
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (t->state & state) {
+ task_unlock(t);
+ return 1;
+ }
+ task_unlock(t);
+ }
+
+ return 0;
+}
+
static int test_task_lmk_waiting(struct task_struct *p)
{
struct task_struct *t;
@@ -435,7 +451,7 @@
int other_free;
int other_file;
- if (mutex_lock_interruptible(&scan_mutex) < 0)
+ if (!mutex_trylock(&scan_mutex))
return 0;
other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
@@ -495,8 +511,6 @@
if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
if (test_task_lmk_waiting(tsk)) {
rcu_read_unlock();
- /* give the system time to free up the memory */
- msleep_interruptible(20);
mutex_unlock(&scan_mutex);
return 0;
}
@@ -533,6 +547,16 @@
long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
long free = other_free * (long)(PAGE_SIZE / 1024);
+ if (test_task_lmk_waiting(selected) &&
+ (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
+ lowmem_print(2, "'%s' (%d) is already killed\n",
+ selected->comm,
+ selected->pid);
+ rcu_read_unlock();
+ mutex_unlock(&scan_mutex);
+ return 0;
+ }
+
task_lock(selected);
send_sig(SIGKILL, selected, 0);
if (selected->mm)
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 84598db..4f9dd73 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -33,6 +33,7 @@
ION_CP_MFC_HEAP_ID = 12,
ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+ ION_QSECOM_TA_HEAP_ID = 19,
ION_CAMERA_HEAP_ID = 20, /* 8660 only */
ION_SYSTEM_CONTIG_HEAP_ID = 21,
ION_ADSP_HEAP_ID = 22,
@@ -130,6 +131,7 @@
#define ION_PIL1_HEAP_NAME "pil_1"
#define ION_PIL2_HEAP_NAME "pil_2"
#define ION_QSECOM_HEAP_NAME "qsecom"
+#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
#define ION_SECURE_HEAP_NAME "secure_heap"
#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index acbd26b..27bf54b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -58,4 +58,4 @@
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
obj-$(CONFIG_THERMAL_QPNP_ADC_TM) += qpnp-adc-tm.o
-obj-$(CONFIG_THERMAL_TSENS) += msm-tsens.o tsens2xxx.o tsens-dbg.o
+obj-$(CONFIG_THERMAL_TSENS) += msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index 2e795b1..e1fc6b9 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -12,7 +12,9 @@
*/
#include <asm/arch_timer.h>
+#include <linux/platform_device.h>
#include "tsens.h"
+#include "tsens-mtc.h"
/* debug defines */
#define TSENS_DBG_BUS_ID_0 0
@@ -42,6 +44,177 @@
int (*dbg_func)(struct tsens_device *, u32, u32, int *);
};
+static ssize_t
+zonemask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ return snprintf(buf, PAGE_SIZE,
+ "Zone =%d th1=%d th2=%d\n", tmdev->mtcsys.zone_mtc,
+ tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+}
+
+static ssize_t
+zonemask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = sscanf(buf, "%d %d %d", &tmdev->mtcsys.zone_mtc,
+ &tmdev->mtcsys.th1, &tmdev->mtcsys.th2);
+
+ if (ret != TSENS_ZONEMASK_PARAMS) {
+ pr_err("Invalid command line arguments\n");
+ count = -EINVAL;
+ } else {
+ pr_debug("store zone_mtc=%d th1=%d th2=%d\n",
+ tmdev->mtcsys.zone_mtc,
+ tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+ ret = tsens_set_mtc_zone_sw_mask(tmdev->mtcsys.zone_mtc,
+ tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+ if (ret < 0) {
+ pr_err("Invalid command line arguments\n");
+ count = -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t
+zonelog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int ret, zlog[TSENS_MTC_ZONE_LOG_SIZE];
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = tsens_get_mtc_zone_log(tmdev->mtcsys.zone_log, zlog);
+ if (ret < 0) {
+ pr_err("Invalid command line arguments\n");
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE,
+ "Log[0]=%d\nLog[1]=%d\nLog[2]=%d\nLog[3]=%d\nLog[4]=%d\nLog[5]=%d\n",
+ zlog[0], zlog[1], zlog[2], zlog[3], zlog[4], zlog[5]);
+}
+
+static ssize_t
+zonelog_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_log);
+ if (ret < 0) {
+ pr_err("Invalid command line arguments\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+zonehist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int ret, zhist[TSENS_MTC_ZONE_HISTORY_SIZE];
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = tsens_get_mtc_zone_history(tmdev->mtcsys.zone_hist, zhist);
+ if (ret < 0) {
+ pr_err("Invalid command line arguments\n");
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE,
+ "Cool = %d\nYellow = %d\nRed = %d\n",
+ zhist[0], zhist[1], zhist[2]);
+}
+
+static ssize_t
+zonehist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct tsens_device *tmdev = NULL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_hist);
+ if (ret < 0) {
+ pr_err("Invalid command line arguments\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct device_attribute tsens_mtc_dev_attr[] = {
+ __ATTR(zonemask, 0644, zonemask_show, zonemask_store),
+ __ATTR(zonelog, 0644, zonelog_show, zonelog_store),
+ __ATTR(zonehist, 0644, zonehist_show, zonehist_store),
+};
+
+static int tsens_dbg_mtc_data(struct tsens_device *data,
+ u32 id, u32 dbg_type, int *val)
+{
+ int result = 0, i;
+ struct tsens_device *tmdev = NULL;
+ struct device_attribute *attr_ptr = NULL;
+
+ attr_ptr = tsens_mtc_dev_attr;
+ tmdev = data;
+
+ for (i = 0; i < ARRAY_SIZE(tsens_mtc_dev_attr); i++) {
+ result = device_create_file(&tmdev->pdev->dev, &attr_ptr[i]);
+ if (result < 0)
+ goto error;
+ }
+
+ return result;
+
+error:
+ for (i--; i >= 0; i--)
+ device_remove_file(&tmdev->pdev->dev, &attr_ptr[i]);
+
+ return result;
+}
+
static int tsens_dbg_log_temp_reads(struct tsens_device *data, u32 id,
u32 dbg_type, int *temp)
{
@@ -206,6 +379,7 @@
[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
tsens_dbg_log_interrupt_timestamp},
[TSENS_DBG_LOG_BUS_ID_DATA] = {tsens_dbg_log_bus_id_data},
+ [TSENS_DBG_MTC_DATA] = {tsens_dbg_mtc_data},
};
int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
diff --git a/drivers/thermal/tsens-mtc.c b/drivers/thermal/tsens-mtc.c
new file mode 100644
index 0000000..529503f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.c
@@ -0,0 +1,195 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "tsens.h"
+#include "tsens-mtc.h"
+
+struct tsens_device *tsens_controller_is_present(void)
+{
+ struct tsens_device *tmdev_chip = NULL;
+
+ if (list_empty(&tsens_device_list)) {
+ pr_err("%s: TSENS controller not available\n", __func__);
+ return tmdev_chip;
+ }
+
+ list_for_each_entry(tmdev_chip, &tsens_device_list, list)
+ return tmdev_chip;
+
+ return tmdev_chip;
+}
+EXPORT_SYMBOL(tsens_controller_is_present);
+
+static int tsens_mtc_reset_history_counter(unsigned int zone)
+{
+ unsigned int reg_cntl, is_valid;
+ void __iomem *sensor_addr;
+ struct tsens_device *tmdev = NULL;
+
+ if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+ return -EINVAL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(tmdev->tsens_tm_addr);
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ is_valid = (reg_cntl & TSENS_RESET_HISTORY_MASK)
+ >> TSENS_RESET_HISTORY_SHIFT;
+ if (!is_valid) {
+ /*Enable the bit to reset counter*/
+ writel_relaxed(reg_cntl | (1 << TSENS_RESET_HISTORY_SHIFT),
+ (sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+ }
+
+ /*Disble the bit to start counter*/
+ writel_relaxed(reg_cntl & ~(1 << TSENS_RESET_HISTORY_SHIFT),
+ (sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_mtc_reset_history_counter);
+
+int tsens_set_mtc_zone_sw_mask(unsigned int zone, unsigned int th1_enable,
+ unsigned int th2_enable)
+{
+ unsigned int reg_cntl;
+ void __iomem *sensor_addr;
+ struct tsens_device *tmdev = NULL;
+
+ if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+ return -EINVAL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR
+ (tmdev->tsens_tm_addr);
+
+ if (th1_enable && th2_enable)
+ writel_relaxed(TSENS_MTC_IN_EFFECT,
+ (sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ if (!th1_enable && !th2_enable)
+ writel_relaxed(TSENS_MTC_DISABLE,
+ (sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ if (th1_enable && !th2_enable)
+ writel_relaxed(TSENS_TH1_MTC_IN_EFFECT,
+ (sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ if (!th1_enable && th2_enable)
+ writel_relaxed(TSENS_TH2_MTC_IN_EFFECT,
+ (sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ pr_debug("tsens : zone =%d th1=%d th2=%d reg=%x\n",
+ zone, th1_enable, th2_enable, reg_cntl);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_set_mtc_zone_sw_mask);
+
+int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log)
+{
+ unsigned int i, reg_cntl, is_valid, log[TSENS_MTC_ZONE_LOG_SIZE];
+ int *zlog = (int *)zone_log;
+ void __iomem *sensor_addr;
+ struct tsens_device *tmdev = NULL;
+
+ if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+ return -EINVAL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ sensor_addr = TSENS_TM_MTC_ZONE0_LOG(tmdev->tsens_tm_addr);
+
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+ is_valid = (reg_cntl & TSENS_LOGS_VALID_MASK)
+ >> TSENS_LOGS_VALID_SHIFT;
+ if (is_valid) {
+ log[0] = (reg_cntl & TSENS_LOGS_LATEST_MASK);
+ log[1] = (reg_cntl & TSENS_LOGS_LOG1_MASK)
+ >> TSENS_LOGS_LOG1_SHIFT;
+ log[2] = (reg_cntl & TSENS_LOGS_LOG2_MASK)
+ >> TSENS_LOGS_LOG2_SHIFT;
+ log[3] = (reg_cntl & TSENS_LOGS_LOG3_MASK)
+ >> TSENS_LOGS_LOG3_SHIFT;
+ log[4] = (reg_cntl & TSENS_LOGS_LOG4_MASK)
+ >> TSENS_LOGS_LOG4_SHIFT;
+ log[5] = (reg_cntl & TSENS_LOGS_LOG5_MASK)
+ >> TSENS_LOGS_LOG5_SHIFT;
+ for (i = 0; i < (TSENS_MTC_ZONE_LOG_SIZE); i++) {
+ *(zlog+i) = log[i];
+ pr_debug("Log[%d]=%d\n", i, log[i]);
+ }
+ } else {
+ pr_debug("tsens: Valid bit disabled\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_log);
+
+int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist)
+{
+ unsigned int i, reg_cntl, hist[TSENS_MTC_ZONE_HISTORY_SIZE];
+ int *zhist = (int *)zone_hist;
+ void __iomem *sensor_addr;
+ struct tsens_device *tmdev = NULL;
+
+ if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+ return -EINVAL;
+
+ tmdev = tsens_controller_is_present();
+ if (!tmdev) {
+ pr_err("No TSENS controller present\n");
+ return -EPROBE_DEFER;
+ }
+
+ sensor_addr = TSENS_TM_MTC_ZONE0_HISTORY(tmdev->tsens_tm_addr);
+ reg_cntl = readl_relaxed((sensor_addr +
+ (zone * TSENS_SN_ADDR_OFFSET)));
+
+ hist[0] = (reg_cntl & TSENS_PS_COOL_CMD_MASK);
+ hist[1] = (reg_cntl & TSENS_PS_YELLOW_CMD_MASK)
+ >> TSENS_PS_YELLOW_CMD_SHIFT;
+ hist[2] = (reg_cntl & TSENS_PS_RED_CMD_MASK)
+ >> TSENS_PS_RED_CMD_SHIFT;
+ for (i = 0; i < (TSENS_MTC_ZONE_HISTORY_SIZE); i++) {
+ *(zhist+i) = hist[i];
+ pr_debug("tsens : %d\n", hist[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_history);
diff --git a/drivers/thermal/tsens-mtc.h b/drivers/thermal/tsens-mtc.h
new file mode 100644
index 0000000..979513f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define TSENS_NUM_MTC_ZONES_SUPPORT 3
+#define TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(n) ((n) + 0x140)
+#define TSENS_TM_MTC_ZONE0_LOG(n) ((n) + 0x150)
+#define TSENS_TM_MTC_ZONE0_HISTORY(n) ((n) + 0x160)
+#define TSENS_SN_ADDR_OFFSET 0x4
+#define TSENS_RESET_HISTORY_MASK 0x4
+#define TSENS_ZONEMASK_PARAMS 3
+#define TSENS_MTC_ZONE_LOG_SIZE 6
+#define TSENS_MTC_ZONE_HISTORY_SIZE 3
+
+#define TSENS_TH1_MTC_IN_EFFECT BIT(0)
+#define TSENS_TH2_MTC_IN_EFFECT BIT(1)
+#define TSENS_MTC_IN_EFFECT 0x3
+#define TSENS_MTC_DISABLE 0x0
+
+#define TSENS_LOGS_VALID_MASK 0x40000000
+#define TSENS_LOGS_VALID_SHIFT 30
+#define TSENS_LOGS_LATEST_MASK 0x0000001f
+#define TSENS_LOGS_LOG1_MASK 0x000003e0
+#define TSENS_LOGS_LOG2_MASK 0x00007c00
+#define TSENS_LOGS_LOG3_MASK 0x000f8000
+#define TSENS_LOGS_LOG4_MASK 0x01f00000
+#define TSENS_LOGS_LOG5_MASK 0x3e000000
+#define TSENS_LOGS_LOG1_SHIFT 5
+#define TSENS_LOGS_LOG2_SHIFT 10
+#define TSENS_LOGS_LOG3_SHIFT 15
+#define TSENS_LOGS_LOG4_SHIFT 20
+#define TSENS_LOGS_LOG5_SHIFT 25
+
+#define TSENS_PS_RED_CMD_MASK 0x3ff00000
+#define TSENS_PS_YELLOW_CMD_MASK 0x000ffc00
+#define TSENS_PS_COOL_CMD_MASK 0x000003ff
+#define TSENS_PS_YELLOW_CMD_SHIFT 0xa
+#define TSENS_PS_RED_CMD_SHIFT 0x14
+
+#define TSENS_RESET_HISTORY_SHIFT 2
+
+#define TSENS_ZONEMASK_PARAMS 3
+#define TSENS_MTC_ZONE_LOG_SIZE 6
+#define TSENS_MTC_ZONE_HISTORY_SIZE 3
+
+extern int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist);
+extern struct tsens_device *tsens_controller_is_present(void);
+extern int tsens_set_mtc_zone_sw_mask(unsigned int zone,
+ unsigned int th1_enable, unsigned int th2_enable);
+extern int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index ec2d592..ae4741d 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -32,6 +32,7 @@
TSENS_DBG_LOG_TEMP_READS,
TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
TSENS_DBG_LOG_BUS_ID_DATA,
+ TSENS_DBG_MTC_DATA,
TSENS_DBG_LOG_MAX
};
@@ -114,6 +115,15 @@
u32 cycle_compltn_monitor_mask;
bool wd_bark;
u32 wd_bark_mask;
+ bool mtc;
+};
+
+struct tsens_mtc_sysfs {
+ uint32_t zone_log;
+ int zone_mtc;
+ int th1;
+ int th2;
+ uint32_t zone_hist;
};
struct tsens_device {
@@ -130,8 +140,10 @@
spinlock_t tsens_upp_low_lock;
const struct tsens_data *ctrl_data;
struct tsens_sensor sensor[0];
+ struct tsens_mtc_sysfs mtcsys;
};
extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern struct list_head tsens_device_list;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index fd625ae..50c847f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -570,6 +570,11 @@
spin_lock_init(&tmdev->tsens_crit_lock);
spin_lock_init(&tmdev->tsens_upp_low_lock);
+ if (tmdev->ctrl_data->mtc) {
+ if (tmdev->ops->dbg)
+ tmdev->ops->dbg(tmdev, 0, TSENS_DBG_MTC_DATA, NULL);
+ }
+
return 0;
}
@@ -628,6 +633,7 @@
.wd_bark = false,
.wd_bark_mask = 1,
.ops = &ops_tsens2xxx,
+ .mtc = true,
};
const struct tsens_data data_tsens23xx = {
@@ -636,6 +642,7 @@
.wd_bark = true,
.wd_bark_mask = 1,
.ops = &ops_tsens2xxx,
+ .mtc = false,
};
const struct tsens_data data_tsens24xx = {
@@ -645,4 +652,5 @@
/* Enable Watchdog monitoring by unmasking */
.wd_bark_mask = 0,
.ops = &ops_tsens2xxx,
+ .mtc = false,
};
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 67a71ba..0ce23c3 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1286,6 +1286,8 @@
geni_se_rx_dma_unprep(msm_port->wrapper_dev, msm_port->rx_dma,
DMA_RX_BUF_SIZE);
+ msm_port->rx_dma = (dma_addr_t)NULL;
+
rx_bytes = geni_read_reg_nolog(uport->membase, SE_DMA_RX_LEN_IN);
if (unlikely(!msm_port->rx_buf)) {
IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: NULL Rx_buf\n",
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 64ed834..719fcbf 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1173,6 +1173,9 @@
device_property_read_u32(dev, "snps,xhci-imod-value",
&dwc->xhci_imod_value);
+ dwc->core_id = -1;
+ device_property_read_u32(dev, "usb-core-id", &dwc->core_id);
+
dwc->usb3_lpm_capable = device_property_read_bool(dev,
"snps,usb3_lpm_capable");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 68a40f9..a8400dd 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -954,6 +954,7 @@
* increments or 0 to disable.
* @create_reg_debugfs: create debugfs entry to allow dwc3 register dump
* @xhci_imod_value: imod value to use with xhci
+ * @core_id: usb core id to differentiate different controller
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -1150,6 +1151,7 @@
struct dwc3_gadget_events dbg_gadget_events;
bool create_reg_debugfs;
u32 xhci_imod_value;
+ int core_id;
};
/* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
index 3860a1a..44c082a 100644
--- a/drivers/usb/dwc3/dbm.c
+++ b/drivers/usb/dwc3/dbm.c
@@ -450,7 +450,7 @@
}
-int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, unsigned long addr,
u32 size, u8 dst_pipe_idx)
{
u8 dbm_ep = dst_pipe_idx;
diff --git a/drivers/usb/dwc3/dbm.h b/drivers/usb/dwc3/dbm.h
index 260afc2..d8e1ce9 100644
--- a/drivers/usb/dwc3/dbm.h
+++ b/drivers/usb/dwc3/dbm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -63,7 +63,7 @@
int dbm_get_num_of_eps_configured(struct dbm *dbm);
int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
int size);
-int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, unsigned long addr,
u32 size, u8 dst_pipe_idx);
void dbm_set_speed(struct dbm *dbm, bool speed);
void dbm_enable(struct dbm *dbm);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index b022e54..5c70da8 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -461,7 +461,7 @@
* @size - size of data fifo.
*
*/
-int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr,
u32 size, u8 dst_pipe_idx)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 3f79aa4..8b159c3 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -52,7 +52,7 @@
return irq;
}
-#define NUMBER_OF_PROPS 4
+#define NUMBER_OF_PROPS 5
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[NUMBER_OF_PROPS];
@@ -62,6 +62,7 @@
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
struct property_entry imod_prop;
+ struct property_entry core_id_prop;
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
@@ -112,6 +113,15 @@
props[prop_idx++] = imod_prop;
}
+ if (dwc->core_id >= 0) {
+ core_id_prop.name = "usb-core-id";
+ core_id_prop.length = sizeof(u32);
+ core_id_prop.is_string = false;
+ core_id_prop.is_array = false;
+ core_id_prop.value.u32_data = dwc->core_id;
+ props[prop_idx++] = core_id_prop;
+ }
+
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b040fdd..31c1dd2b 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -182,6 +182,9 @@
config USB_F_RNDIS
tristate
+config USB_F_QCRNDIS
+ tristate
+
config USB_F_MASS_STORAGE
tristate
@@ -312,6 +315,14 @@
On hardware that can't implement the full protocol,
a simple CDC subset is used, placing fewer demands on USB.
+config USB_CONFIGFS_QCRNDIS
+ bool "QCRNDIS"
+ depends on USB_CONFIGFS
+ depends on RNDIS_IPA
+ depends on NET
+ select USB_U_ETHER
+ select USB_F_QCRNDIS
+
config USB_CONFIGFS_RNDIS
bool "RNDIS"
depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 960c2cc..90c426b 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -64,3 +64,5 @@
obj-$(CONFIG_USB_F_GSI) += usb_f_gsi.o
usb_f_qdss-y := f_qdss.o u_qdss.o
obj-$(CONFIG_USB_F_QDSS) += usb_f_qdss.o
+usb_f_qcrndis-y := f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 0000000..a8e7092
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1580 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "u_data_ipa.h"
+#include <linux/rndis_ipa.h>
+#include "configfs.h"
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, 0644);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+ "Max size of bus transfer to host");
+
+static struct class *rndis_class;
+static dev_t rndis_dev;
+static DEFINE_IDA(chardev_ida);
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet. The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex. Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short: it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets. Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data. The control model is built around
+ * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored). RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface. That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely. Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ * - Power management ... references data that's scattered around lots
+ * of other documentation, which is incorrect/incomplete there too.
+ *
+ * - There are various undocumented protocol requirements, like the need
+ * to send garbage in some control-OUT messages.
+ *
+ * - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+ struct usb_function func;
+ u8 ctrl_id, data_id;
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ u8 ul_max_pkt_per_xfer;
+ u8 pkt_alignment_factor;
+ u32 max_pkt_size;
+ const char *manufacturer;
+ struct rndis_params *params;
+ atomic_t ioctl_excl;
+ atomic_t open_excl;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ atomic_t notify_count;
+ struct gadget_ipa_port bam_port;
+ struct cdev cdev;
+ struct device *dev;
+ u8 port_num;
+ u16 cdc_filter;
+ bool net_ready_trigger;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct f_rndis_qc *rndis);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_rndis_qc, func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS 1
+#define RNDIS_QC_ACTIVE_PORT 0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR 4
+
+#define RNDIS_QC_IOCTL_MAGIC 'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+ .bLength = sizeof(rndis_qc_control_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x03,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+ .bLength = sizeof(rndis_qc_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+ .bLength = sizeof(rndis_qc_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+ .bLength = sizeof(rndis_qc_acm_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+ .bLength = sizeof(rndis_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+ .bLength = sizeof(rndis_qc_data_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+ .bLength = sizeof(rndis_qc_iad_descriptor),
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bFunctionSubClass = 0x01,
+ .bFunctionProtocol = 0x03,
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+ .bLength = sizeof(ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+ .bLength = sizeof(ss_intr_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+ &rndis_qc_string_table,
+ NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1)
+ return 0;
+
+ atomic_dec(excl);
+ return -EBUSY;
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rndis_qc_response_available(void *_rndis)
+{
+ struct f_rndis_qc *rndis = _rndis;
+ struct usb_request *req = rndis->notify_req;
+ __le32 *data = req->buf;
+ int status;
+
+ if (atomic_inc_return(&rndis->notify_count) != 1)
+ return;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ /* Send RNDIS RESPONSE_AVAILABLE notification; a
+ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+ *
+ * This is the only notification defined by RNDIS.
+ */
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ pr_info("notify/0 --> %d\n", status);
+ }
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status = req->status;
+ struct usb_composite_dev *cdev;
+ struct usb_ep *notify_ep;
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ if (!rndis->func.config || !rndis->func.config->cdev) {
+ pr_err("%s(): cdev or config is NULL.\n", __func__);
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ cdev = rndis->func.config->cdev;
+
+ /* after TX:
+ * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+ * - RNDIS_RESPONSE_AVAILABLE (status/irq)
+ */
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&rndis->notify_count, 0);
+ goto out;
+ default:
+ pr_info("RNDIS %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != rndis->notify)
+ goto out;
+
+ /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&rndis->notify_count))
+ goto out;
+ notify_ep = rndis->notify;
+ spin_unlock(&rndis_lock);
+ status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&rndis_lock);
+ if (!_rndis_qc)
+ goto out;
+ atomic_dec(&_rndis_qc->notify_count);
+ DBG(cdev, "notify/1 --> %d\n", status);
+ spin_unlock(&rndis_lock);
+ }
+ }
+
+ return;
+
+out:
+ spin_unlock(&rndis_lock);
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis;
+ int status;
+ rndis_init_msg_type *buf;
+ u32 ul_max_xfer_size, dl_max_xfer_size;
+
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
+ spin_lock(&rndis_lock);
+ rndis = _rndis_qc;
+ if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+ spin_unlock(&rndis_lock);
+ return;
+ }
+
+ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+ if (status < 0)
+ pr_err("RNDIS command error %d, %d/%d\n",
+ status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+ ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
+ /*
+ * For consistent data throughput from IPA, it is required to
+ * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+ * use provided this value to calculate aggregation byte limit
+ * and program IPA hardware for aggregation.
+ * Host provides 8KB or 16KB as Max Transfer size, hence select
+ * minimum out of host provided value and optimum transfer size
+ * to get 7KB as aggregation byte limit.
+ */
+ if (rndis_dl_max_xfer_size)
+ dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+ rndis_get_dl_max_xfer_size(rndis->params));
+ else
+ dl_max_xfer_size =
+ rndis_get_dl_max_xfer_size(rndis->params);
+ ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
+ }
+ spin_unlock(&rndis_lock);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ pr_debug("%s: Enter\n", __func__);
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* RNDIS uses the CDC command encapsulation mechanism to implement
+ * an RPC scheme, with much getting/setting of attributes by OID.
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->complete = rndis_qc_command_complete;
+ /* later, rndis_response_available() sends a notification */
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ else {
+ u8 *buf;
+ u32 n;
+
+ /* return the result */
+ buf = rndis_get_next_response(rndis->params, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ req->complete = rndis_qc_response_complete;
+ rndis_free_response(rndis->params, buf);
+ value = n;
+ }
+ /* else stalls ... spec says to avoid that */
+ }
+ break;
+
+ default:
+invalid:
+ VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->context = rndis;
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("rndis response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+ struct net_device *net_dev;
+
+ net_dev = dev_get_by_name(&init_net, netname);
+ if (!net_dev)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Decrement net_dev refcount as it was incremented in
+ * dev_get_by_name().
+ */
+ dev_put(net_dev);
+ return net_dev;
+}
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned int intf,
+ unsigned int alt)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct f_rndis_qc_opts *opts;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ int ret;
+
+ /* we know alt == 0 */
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ if (intf == rndis->ctrl_id) {
+ if (rndis->notify->driver_data) {
+ VDBG(cdev, "reset rndis control %d\n", intf);
+ usb_ep_disable(rndis->notify);
+ }
+ if (!rndis->notify->desc) {
+ VDBG(cdev, "init rndis ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
+ }
+ usb_ep_enable(rndis->notify);
+ rndis->notify->driver_data = rndis;
+
+ } else if (intf == rndis->data_id) {
+ struct net_device *net;
+
+ rndis->net_ready_trigger = false;
+ if (rndis->bam_port.in->driver_data) {
+ DBG(cdev, "reset rndis\n");
+ /* bam_port is needed for disconnecting the BAM data
+ * path. Only after the BAM data path is disconnected,
+ * we can disconnect the port from the network layer.
+ */
+ ipa_data_disconnect(&rndis->bam_port,
+ USB_IPA_FUNC_RNDIS);
+ }
+
+ if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
+ DBG(cdev, "init rndis\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->bam_port.out)) {
+ rndis->bam_port.in->desc = NULL;
+ rndis->bam_port.out->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* RNDIS should be in the "RNDIS uninitialized" state,
+ * either never activated or after rndis_uninit().
+ *
+ * We don't want data to flow here until a nonzero packet
+ * filter is set, at which point it enters "RNDIS data
+ * initialized" state ... but we do want the endpoints
+ * to be activated. It's a strange little state.
+ *
+ * REVISIT the RNDIS gadget code has done this wrong for a
+ * very long time. We need another call to the link layer
+ * code -- gether_updown(...bool) maybe -- to do it right.
+ */
+ rndis->cdc_filter = 0;
+
+ rndis->bam_port.cdev = cdev;
+ rndis->bam_port.func = &rndis->func;
+ ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+ usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+ src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+ rndis->port_num);
+ dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+ IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+ rndis->port_num);
+ if (src_connection_idx < 0 || dst_connection_idx < 0) {
+ pr_err("%s: usb_bam_get_connection_idx failed\n",
+ __func__);
+ return ret;
+ }
+ if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ src_connection_idx, dst_connection_idx))
+ goto fail;
+
+ DBG(cdev, "RNDIS RX/TX early activation ...\n");
+ rndis_qc_open(rndis);
+ net = rndis_qc_get_net("rndis0");
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ opts->net = net;
+
+ rndis_set_param_dev(rndis->params, net,
+ &rndis->cdc_filter);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ unsigned long flags;
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ DBG(cdev, "rndis deactivated\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis_uninit(rndis->params);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
+
+ msm_ep_unconfig(rndis->bam_port.out);
+ msm_ep_unconfig(rndis->bam_port.in);
+ usb_ep_disable(rndis->notify);
+ rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+ __func__, remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ /* This is required as Linux host side RNDIS driver doesn't
+ * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+ * Hence we perform same operations explicitly here for Linux
+ * host case. In case of windows, this RNDIS state machine is
+ * already updated due to receiving of PACKET_FILTER.
+ */
+ rndis_flow_control(rndis->params, true);
+ pr_debug("%s(): Disconnecting\n", __func__);
+ }
+
+ ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+ pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ bool remote_wakeup_allowed;
+
+ pr_debug("%s: rndis resumed\n", __func__);
+
+ /* Nothing to do if DATA interface wasn't initialized */
+ if (!rndis->bam_port.cdev) {
+ pr_debug("data interface was not up\n");
+ return;
+ }
+
+ if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+ remote_wakeup_allowed = f->func_wakeup_allowed;
+ else
+ remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+ ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+ remote_wakeup_allowed);
+
+ if (!remote_wakeup_allowed) {
+ rndis_qc_open(rndis);
+ /*
+ * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+ * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+ * resume. Hence trigger USB IPA transfer functionality
+ * explicitly here. For Windows host case is also being
+ * handle with RNDIS state machine.
+ */
+ rndis_flow_control(rndis->params, false);
+ }
+
+ pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested. A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct f_rndis_qc *rndis)
+{
+ struct usb_composite_dev *cdev = rndis->func.config->cdev;
+
+ DBG(cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+ rndis_qc_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->params);
+}
+
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
+{
+ if (enable)
+ ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+ else
+ ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct rndis_params *params;
+ int status;
+ struct usb_ep *ep;
+
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->ctrl_id = status;
+ rndis_qc_iad_descriptor.bFirstInterface = status;
+
+ rndis_qc_control_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->data_id = status;
+
+ rndis_qc_data_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.in = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+ rndis->bam_port.out = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+ * optional. We don't treat it that way though! It's simpler,
+ * and some newer profiles don't treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ rndis->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!rndis->notify_req)
+ goto fail;
+ rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!rndis->notify_req->buf)
+ goto fail;
+ rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+ rndis->notify_req->context = rndis;
+ rndis->notify_req->complete = rndis_qc_response_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+ if (!f->fs_descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rndis_qc_hs_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_hs_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_hs_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ rndis_qc_ss_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_ss_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_ss_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ params = rndis_register(rndis_qc_response_available, rndis,
+ ipa_data_flow_control_enable);
+ if (params < 0)
+ goto fail;
+ rndis->params = params;
+
+ rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->params, rndis->ethaddr);
+
+ if (rndis->manufacturer && rndis->vendorID &&
+ rndis_set_param_vendor(rndis->params, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
+
+ pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+ rndis->ul_max_pkt_per_xfer);
+ rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
+
+ /* In case of aggregated packets QC device will request
+ * aliment to 4 (2^2).
+ */
+ pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+ rndis->pkt_alignment_factor);
+ rndis_set_pkt_alignment_factor(rndis->params,
+ rndis->pkt_alignment_factor);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ rndis->bam_port.in->name, rndis->bam_port.out->name,
+ rndis->notify->name);
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->fs_descriptors)
+ usb_free_descriptors(f->fs_descriptors);
+
+ if (rndis->notify_req) {
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+ if (rndis->bam_port.out->desc)
+ rndis->bam_port.out->driver_data = NULL;
+ if (rndis->bam_port.in->desc)
+ rndis->bam_port.in->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void rndis_qc_free(struct usb_function *f)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+ opts->refcnt--;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+
+ pr_debug("rndis_qc_unbind: free\n");
+ rndis_deregister(rndis->params);
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+ /*
+ * call flush_workqueue to make sure that any pending
+ * disconnect_work() from u_bam_data.c file is being
+ * flushed before calling this rndis_ipa_cleanup API
+ * as rndis ipa disconnect API is required to be
+ * called before this.
+ */
+ ipa_data_flush_workqueue();
+ rndis_ipa_cleanup(rndis_ipa_params.private);
+ rndis_ipa_supported = false;
+
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+ struct f_rndis_qc *rndis;
+
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ return;
+ }
+
+ rndis->net_ready_trigger = false;
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+ struct f_rndis_qc *rndis;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ rndis = _rndis_qc;
+ if (!rndis) {
+ pr_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+ if (rndis->net_ready_trigger) {
+ pr_err("%s: Already triggered", __func__);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return;
+ }
+
+ pr_debug("%s: Set net_ready_trigger", __func__);
+ rndis->net_ready_trigger = true;
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+ int status;
+
+ /* allocate and initialize one new instance */
+ status = -ENOMEM;
+
+ opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
+
+ opts->refcnt++;
+ rndis = opts->rndis;
+
+ rndis->vendorID = opts->vendor_id;
+ rndis->manufacturer = opts->manufacturer;
+ /* export host's Ethernet address in CDC format */
+ random_ether_addr(rndis_ipa_params.host_ethaddr);
+ random_ether_addr(rndis_ipa_params.device_ethaddr);
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+ rndis_ipa_params.host_ethaddr,
+ rndis_ipa_params.device_ethaddr);
+ rndis_ipa_supported = true;
+ ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+ rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+
+ /* if max_pkt_per_xfer was not configured set to default value */
+ rndis->ul_max_pkt_per_xfer =
+ max_pkt_per_xfer ? max_pkt_per_xfer :
+ DEFAULT_MAX_PKT_PER_XFER;
+ ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+ /*
+ * Check no RNDIS aggregation, and alignment if not mentioned,
+ * use alignment factor as zero. If aggregated RNDIS data transfer,
+ * max packet per transfer would be default if it is not set
+ * explicitly, and same way use alignment factor as 2 by default.
+ * This would eliminate need of writing to sysfs if default RNDIS
+ * aggregation setting required. Writing to both sysfs entries,
+ * those values will always override default values.
+ */
+ if ((rndis->pkt_alignment_factor == 0) &&
+ (rndis->ul_max_pkt_per_xfer == 1))
+ rndis->pkt_alignment_factor = 0;
+ else
+ rndis->pkt_alignment_factor = pkt_alignment_factor ?
+ pkt_alignment_factor :
+ DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+ /* RNDIS activates when the host changes this filter */
+ rndis->cdc_filter = 0;
+
+ rndis->func.name = "rndis";
+ rndis->func.strings = rndis_qc_strings;
+ /* descriptors are per-instance copies */
+ rndis->func.bind = rndis_qc_bind;
+ rndis->func.unbind = rndis_qc_unbind;
+ rndis->func.set_alt = rndis_qc_set_alt;
+ rndis->func.setup = rndis_qc_setup;
+ rndis->func.disable = rndis_qc_disable;
+ rndis->func.suspend = rndis_qc_suspend;
+ rndis->func.resume = rndis_qc_resume;
+ rndis->func.free_func = rndis_qc_free;
+
+ status = rndis_ipa_init(&rndis_ipa_params);
+ if (status) {
+ pr_err("%s: failed to init rndis_ipa\n", __func__);
+ goto fail;
+ }
+
+ _rndis_qc = rndis;
+
+ return &rndis->func;
+fail:
+ kfree(rndis);
+ _rndis_qc = NULL;
+ return ERR_PTR(status);
+}
+
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+ return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0);
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ pr_info("Open rndis QC driver\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not created yet\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+ pr_err("Already opened\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ fp->private_data = _rndis_qc;
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ if (!ret)
+ pr_info("rndis QC file opened\n");
+
+ return ret;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+ unsigned long flags;
+
+ pr_info("Close rndis QC file\n");
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return -ENODEV;
+ }
+ rndis_qc_unlock(&_rndis_qc->open_excl);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ u8 qc_max_pkt_per_xfer = 0;
+ u32 qc_max_pkt_size = 0;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rndis_lock, flags);
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+ qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+ if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ spin_unlock_irqrestore(&rndis_lock, flags);
+
+ pr_info("Received command %d\n", cmd);
+
+ switch (cmd) {
+ case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_per_xfer,
+ sizeof(qc_max_pkt_per_xfer));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_info("Sent UL max packets per xfer %d\n",
+ qc_max_pkt_per_xfer);
+ break;
+ case RNDIS_QC_GET_MAX_PKT_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &qc_max_pkt_size,
+ sizeof(qc_max_pkt_size));
+ if (ret) {
+ pr_err("copying to user space failed\n");
+ ret = -EFAULT;
+ }
+ pr_debug("Sent max packet size %d\n",
+ qc_max_pkt_size);
+ break;
+ default:
+ pr_err("Unsupported IOCTL\n");
+ ret = -EINVAL;
+ }
+
+ spin_lock_irqsave(&rndis_lock, flags);
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not present\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+ spin_unlock_irqrestore(&rndis_lock, flags);
+ return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+ .owner = THIS_MODULE,
+ .open = rndis_qc_open_dev,
+ .release = rndis_qc_release_dev,
+ .unlocked_ioctl = rndis_qc_ioctl,
+};
+
+static void qcrndis_free_inst(struct usb_function_instance *f)
+{
+ struct f_rndis_qc_opts *opts = container_of(f,
+ struct f_rndis_qc_opts, func_inst);
+ int minor = MINOR(opts->rndis->cdev.dev);
+ unsigned long flags;
+
+ device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+ class_destroy(rndis_class);
+ cdev_del(&opts->rndis->cdev);
+ ida_simple_remove(&chardev_ida, minor);
+ unregister_chrdev_region(rndis_dev, 1);
+
+ ipa_data_free(USB_IPA_FUNC_RNDIS);
+ spin_lock_irqsave(&rndis_lock, flags);
+ kfree(opts->rndis);
+ _rndis_qc = NULL;
+ kfree(opts);
+ spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ struct f_rndis_qc_opts *opts = container_of(fi,
+ struct f_rndis_qc_opts, func_inst);
+ struct f_rndis_qc *rndis;
+ int name_len;
+ int ret, minor;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ pr_debug("initialize rndis QC instance\n");
+ rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+ if (!rndis) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&rndis_lock);
+ opts->rndis = rndis;
+ rndis_class = class_create(THIS_MODULE, "usbrndis");
+ ret = alloc_chrdev_region(&rndis_dev, 0, 1, "usb_rndis");
+ if (ret < 0) {
+ pr_err("Fail to allocate usb rndis char dev region\n");
+ return ret;
+ }
+
+ /* get a minor number */
+ minor = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL);
+ if (minor < 0) {
+ pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+ minor);
+ ret = -ENODEV;
+ goto fail_out_of_minors;
+ }
+ rndis->dev = device_create(rndis_class, NULL,
+ MKDEV(MAJOR(rndis_dev), minor),
+ rndis, "android_rndis_qc");
+ if (IS_ERR(rndis->dev)) {
+ ret = PTR_ERR(rndis->dev);
+ pr_err("%s: device_create failed for (%d)", __func__, ret);
+ goto fail_return_minor;
+ }
+ cdev_init(&rndis->cdev, &rndis_qc_fops);
+ ret = cdev_add(&rndis->cdev, MKDEV(MAJOR(rndis_dev), minor), 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed for %s (%d)", __func__,
+ name, ret);
+ goto fail_cdev_add;
+ }
+
+ if (ret)
+ pr_err("rndis QC driver failed to register\n");
+
+ ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ goto fail_data_setup;
+ }
+
+ return 0;
+fail_data_setup:
+ cdev_del(&rndis->cdev);
+fail_cdev_add:
+ device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+fail_return_minor:
+ ida_simple_remove(&chardev_ida, minor);
+fail_out_of_minors:
+ unregister_chrdev_region(rndis_dev, 1);
+ class_destroy(rndis_class);
+ kfree(rndis);
+ return ret;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_rndis_qc_opts,
+ func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+ struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+ .release = qcrndis_attr_release,
+};
+
+static struct config_item_type qcrndis_func_type = {
+ .ct_item_ops = &qcrndis_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+ struct f_rndis_qc_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+ opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &qcrndis_func_type);
+
+ return &opts->func_inst;
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+ return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+ return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+ return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+ return rndis_ipa_params.skip_ep_cfg;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+ int ret;
+
+ ret = usb_function_register(&rndis_bamusb_func);
+ if (ret) {
+ pr_err("%s: failed to register diag %d\n", __func__, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+ usb_function_unregister(&rndis_bamusb_func);
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index 4ba2e9b..72edb90 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -31,6 +31,9 @@
u32 peer_pipe_idx;
unsigned long usb_bam_handle;
struct sps_mem_buffer *data_fifo;
+ unsigned long qdss_bam_iova;
+ phys_addr_t qdss_bam_phys;
+ u32 qdss_bam_size;
};
struct gqdss {
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ac2231a..5d8e6fa 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -596,6 +596,7 @@
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
+ params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
params->resp_avail(params->v);
return 0;
}
@@ -1015,6 +1016,18 @@
}
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+ pr_debug("%s:\n", __func__);
+ return params->ul_max_xfer_size;
+}
+
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
{
pr_debug("%s:\n", __func__);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 4ffc282..a3051c4 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -194,6 +194,7 @@
u32 host_rndis_major_ver;
u32 host_rndis_minor_ver;
u32 dl_max_xfer_size;
+ u32 ul_max_xfer_size;
const char *vendorDescr;
u8 pkt_alignment_factor;
void (*resp_avail)(void *v);
@@ -216,6 +217,8 @@
int rndis_set_param_medium(struct rndis_params *params, u32 medium,
u32 speed);
void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params);
void rndis_add_hdr(struct sk_buff *skb);
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 0000000..f379028
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,1402 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "u_data_ipa.h"
+#include "u_rmnet.h"
+
+struct ipa_data_ch_info {
+ struct usb_request *rx_req;
+ struct usb_request *tx_req;
+ unsigned long flags;
+ unsigned int id;
+ enum ipa_func_type func_type;
+ bool is_connected;
+ unsigned int port_num;
+ spinlock_t port_lock;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
+
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
+ u8 src_connection_idx;
+ u8 dst_connection_idx;
+ enum usb_ctrl usb_bam_type;
+ struct gadget_ipa_port *port_usb;
+ struct usb_gadget *gadget;
+ atomic_t pipe_connect_notified;
+ struct usb_bam_connect_ipa_params ipa_params;
+};
+
+struct rndis_data_ch_info {
+ /* this provides downlink (device->host i.e host) side configuration*/
+ u32 dl_max_transfer_size;
+ /* this provides uplink (host->device i.e device) side configuration */
+ u32 ul_max_transfer_size;
+ u32 ul_max_packets_number;
+ bool ul_aggregation_enable;
+ u32 prod_clnt_hdl;
+ u32 cons_clnt_hdl;
+ void *priv;
+};
+
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_debug("%s: endless complete for(%s) with status: %d\n",
+ __func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC);
+ if (status)
+ pr_err("error enqueuing endless RX_REQ, %d\n", status);
+ }
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+ unsigned long flags;
+ int status;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || (in && !port->tx_req)
+ || (!in && !port->rx_req)) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): port_usb/req is NULL.\n", __func__);
+ return;
+ }
+
+ if (in)
+ ep = port->port_usb->in;
+ else
+ ep = port->port_usb->out;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (in) {
+ pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+ status = usb_ep_dequeue(ep, port->tx_req);
+ if (status)
+ pr_err("error dequeueing endless TX_REQ, %d\n", status);
+ } else {
+ pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+ status = usb_ep_dequeue(ep, port->rx_req);
+ if (status)
+ pr_err("error dequeueing endless RX_REQ, %d\n", status);
+ }
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ * Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_ep *epin, *epout;
+
+ pr_debug("%s: Triggered: starting tx, rx", __func__);
+ /* queue in & out requests */
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL, can't start tx, rx", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb || !port->port_usb->in ||
+ !port->port_usb->out) {
+ pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ if (!port->rx_req || !port->tx_req) {
+ pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+ port->rx_req, port->tx_req);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ if (!port->is_connected) {
+ pr_debug("%s: pipes are disconnected", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ epout = port->port_usb->out;
+ epin = port->port_usb->in;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ /* queue in & out requests */
+ pr_debug("%s: Starting rx", __func__);
+ if (epout)
+ ipa_data_start_endless_xfer(port, false);
+
+ pr_debug("%s: Starting tx", __func__);
+ if (epin)
+ ipa_data_start_endless_xfer(port, true);
+}
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ disconnect_w);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("Already disconnected.\n");
+ return;
+ }
+ port->is_connected = false;
+ pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+ port->ipa_params.prod_clnt_hdl,
+ port->ipa_params.cons_clnt_hdl);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ /*
+ * NOTE: it is required to disconnect USB and IPA BAM related
+ * pipes before calling IPA tethered function related disconnect
+ * API. IPA tethered function related disconnect API delete
+ * depedency graph with IPA RM which would results into IPA not
+ * pulling data although there is pending data on USB BAM
+ * producer pipe.
+ */
+ if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+ void *priv;
+
+ priv = rndis_qc_get_ipa_priv();
+ rndis_ipa_pipe_disconnect_notify(priv);
+ }
+ }
+
+ if (port->ipa_params.prod_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ if (port->ipa_params.cons_clnt_hdl)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+ /*
+ * Decrement usage count which was incremented
+ * upon cable connect or cable disconnect in suspended state.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb) {
+ gadget = port->port_usb->cdev->gadget;
+ port->port_usb->ipa_consumer_ep = -1;
+ port->port_usb->ipa_producer_ep = -1;
+
+ if (port->port_usb->in) {
+ /*
+ * Disable endpoints.
+ * Unlocking is needed since disabling the eps might
+ * stop active transfers and therefore the request
+ * complete function will be called, where we try
+ * to obtain the spinlock as well.
+ */
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->tx_req) {
+ usb_ep_free_request(port->port_usb->in,
+ port->tx_req);
+ port->tx_req = NULL;
+ }
+ port->port_usb->in->endless = false;
+ }
+
+ if (port->port_usb->out) {
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_ep_disable(port->port_usb->out);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->rx_req) {
+ usb_ep_free_request(port->port_usb->out,
+ port->rx_req);
+ port->rx_req = NULL;
+ }
+ port->port_usb->out->endless = false;
+ }
+
+ port->port_usb = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+ struct sps_mem_buffer data_fifo = {0};
+ u32 usb_bam_pipe_idx;
+
+ get_bam2bam_connection_info(bam_type, idx,
+ &usb_bam_pipe_idx,
+ NULL, &data_fifo, NULL);
+ msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+ usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct gadget_ipa_port *gport;
+ struct usb_gadget *gadget = NULL;
+ struct teth_bridge_connect_params connect_params;
+ struct teth_bridge_init_params teth_bridge_params;
+ u32 sps_params;
+ int ret;
+ unsigned long flags;
+ bool is_ipa_disconnected = true;
+
+ pr_debug("%s: Connect workqueue started\n", __func__);
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s(): port_usb is NULL.\n", __func__);
+ return;
+ }
+
+ gport = port->port_usb;
+ if (gport && gport->cdev)
+ gadget = gport->cdev->gadget;
+
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ pr_err("%s: gport is NULL.\n", __func__);
+ return;
+ }
+
+ /*
+ * check if connect_w got called two times during RNDIS resume as
+ * explicit flow control is called to start data transfers after
+ * ipa_data_connect()
+ */
+ if (port->is_connected) {
+ pr_debug("IPA connect is already done & Transfers started\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+ return;
+ }
+
+ gport->ipa_consumer_ep = -1;
+ gport->ipa_producer_ep = -1;
+
+ port->is_connected = true;
+
+ /* update IPA Parameteres here. */
+ port->ipa_params.usb_connection_speed = gadget->speed;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.skip_ep_cfg = true;
+ port->ipa_params.keep_ipa_awake = true;
+ port->ipa_params.cons_clnt_hdl = -1;
+ port->ipa_params.prod_clnt_hdl = -1;
+
+ if (gport->out) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->rx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or rx_req cleaned\n",
+ __func__);
+ goto out;
+ }
+
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ | MSM_PRODUCER | port->src_pipe_idx;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto out;
+ }
+ }
+
+ if (gport->in) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_alloc_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || port->tx_req == NULL) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: port_usb is NULL, or tx_req cleaned\n",
+ __func__);
+ goto unconfig_msm_ep_out;
+ }
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+ ret = msm_ep_config(gport->in);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_out;
+ }
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ teth_bridge_params.client = port->ipa_params.src_client;
+ ret = teth_bridge_init(&teth_bridge_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_init() failed\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_in;
+ }
+ }
+
+ /*
+ * Perform below operations for Tx from Device (OUT transfer)
+ * 1. Connect with pipe of USB BAM with IPA BAM pipe
+ * 2. Update USB Endpoint related information using SPS Param.
+ * 3. Configure USB Endpoint/DBM for the same.
+ * 4. Override USB ep queue functionality for endless transfer.
+ */
+ if (gport->out) {
+ pr_debug("configure bam ipa connect for USB OUT\n");
+ port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+ }
+
+ if (gport->in) {
+ pr_debug("configure bam ipa connect for USB IN\n");
+ port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+ port->ipa_params.priv = rndis_qc_get_ipa_priv();
+ port->ipa_params.skip_ep_cfg =
+ rndis_qc_get_skip_ep_config();
+ } else if (port->func_type == USB_IPA_FUNC_RMNET) {
+ port->ipa_params.notify =
+ teth_bridge_params.usb_notify_cb;
+ port->ipa_params.priv =
+ teth_bridge_params.private_data;
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
+ port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+ port->ipa_params.skip_ep_cfg =
+ teth_bridge_params.skip_ep_cfg;
+ }
+
+ if (port->func_type == USB_IPA_FUNC_DPL)
+ port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = usb_bam_connect_ipa(port->usb_bam_type,
+ &port->ipa_params);
+ if (ret) {
+ pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ is_ipa_disconnected = false;
+ /* check if USB cable is disconnected or not */
+ if (!port->port_usb) {
+ pr_debug("%s:%d: cable is disconnected.\n",
+ __func__, __LINE__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto disconnect_usb_bam_ipa_out;
+ }
+
+ gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+ }
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ rndis_data->prod_clnt_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ rndis_data->cons_clnt_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ rndis_data->priv = port->ipa_params.priv;
+
+ pr_debug("ul_max_transfer_size:%d\n",
+ rndis_data->ul_max_transfer_size);
+ pr_debug("ul_max_packets_number:%d\n",
+ rndis_data->ul_max_packets_number);
+ pr_debug("dl_max_transfer_size:%d\n",
+ rndis_data->dl_max_transfer_size);
+
+ ret = rndis_ipa_pipe_connect_notify(
+ rndis_data->cons_clnt_hdl,
+ rndis_data->prod_clnt_hdl,
+ rndis_data->ul_max_transfer_size,
+ rndis_data->ul_max_packets_number,
+ rndis_data->dl_max_transfer_size,
+ rndis_data->priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ atomic_set(&port->pipe_connect_notified, 1);
+ } else if (port->func_type == USB_IPA_FUNC_RMNET ||
+ port->func_type == USB_IPA_FUNC_DPL) {
+ /* For RmNet and DPL need to update_ipa_pipes to qti */
+ enum qti_port_type qti_port_type = port->func_type ==
+ USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL;
+ gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type,
+ gport->ipa_producer_ep, gport->ipa_consumer_ep);
+ }
+
+ if (port->func_type == USB_IPA_FUNC_RMNET) {
+ connect_params.ipa_usb_pipe_hdl =
+ port->ipa_params.prod_clnt_hdl;
+ connect_params.usb_ipa_pipe_hdl =
+ port->ipa_params.cons_clnt_hdl;
+ connect_params.tethering_mode =
+ TETH_TETHERING_MODE_RMNET;
+ connect_params.client_type =
+ port->ipa_params.src_client;
+ ret = teth_bridge_connect(&connect_params);
+ if (ret) {
+ pr_err("%s:teth_bridge_connect() failed\n", __func__);
+ goto disconnect_usb_bam_ipa_out;
+ }
+ }
+
+ pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+ gport->ipa_producer_ep,
+ gport->ipa_consumer_ep);
+
+ pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+ port->src_connection_idx, port->dst_connection_idx);
+
+ /* Don't queue the transfers yet, only after network stack is up */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ pr_debug("%s: Not starting now, waiting for network notify",
+ __func__);
+ return;
+ }
+
+ if (gport->out)
+ ipa_data_start_endless_xfer(port, false);
+ if (gport->in)
+ ipa_data_start_endless_xfer(port, true);
+
+ pr_debug("Connect workqueue done (port %pK)", port);
+ return;
+
+disconnect_usb_bam_ipa_out:
+ if (!is_ipa_disconnected) {
+ usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+ is_ipa_disconnected = true;
+ }
+ if (port->func_type == USB_IPA_FUNC_RMNET)
+ teth_bridge_disconnect(port->ipa_params.src_client);
+unconfig_msm_ep_in:
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->in)
+ msm_ep_unconfig(port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+unconfig_msm_ep_out:
+ if (gport->in)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->dst_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb && gport->out)
+ msm_ep_unconfig(port->port_usb->out);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+ if (gport->out)
+ usb_bam_free_fifos(port->usb_bam_type,
+ port->src_connection_idx);
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->is_connected = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_gadget_autopm_put_async(port->gadget);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+ gp, func, src_connection_idx, dst_connection_idx);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid portno#%d\n", func);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (!gp) {
+ pr_err("gadget port is null\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ port = ipa_data_ports[func];
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->port_usb = gp;
+ port->gadget = gp->cdev->gadget;
+
+ if (gp->out) {
+ port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC);
+ if (!port->rx_req) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s: failed to allocate rx_req\n", __func__);
+ goto err;
+ }
+ port->rx_req->context = port;
+ port->rx_req->complete = ipa_data_endless_complete;
+ port->rx_req->length = 0;
+ port->rx_req->no_interrupt = 1;
+ }
+
+ if (gp->in) {
+ port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC);
+ if (!port->tx_req) {
+ pr_err("%s: failed to allocate tx_req\n", __func__);
+ goto free_rx_req;
+ }
+ port->tx_req->context = port;
+ port->tx_req->complete = ipa_data_endless_complete;
+ port->tx_req->length = 0;
+ port->tx_req->no_interrupt = 1;
+ }
+ port->src_connection_idx = src_connection_idx;
+ port->dst_connection_idx = dst_connection_idx;
+ port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+ port->ipa_params.src_pipe = &(port->src_pipe_idx);
+ port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+ port->ipa_params.src_idx = src_connection_idx;
+ port->ipa_params.dst_idx = dst_connection_idx;
+
+ /*
+ * Disable Xfer complete and Xfer not ready interrupts by
+ * marking endless flag which is used in UDC driver to enable
+ * these interrupts. with this set, these interrupts for selected
+ * endpoints won't be enabled.
+ */
+ if (port->port_usb->in) {
+ port->port_usb->in->endless = true;
+ ret = usb_ep_enable(port->port_usb->in);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:IN ep:%pK",
+ port->port_usb->in);
+ usb_ep_free_request(port->port_usb->in, port->tx_req);
+ port->tx_req = NULL;
+ port->port_usb->in->endless = false;
+ goto err_usb_in;
+ }
+ }
+
+ if (port->port_usb->out) {
+ port->port_usb->out->endless = true;
+ ret = usb_ep_enable(port->port_usb->out);
+ if (ret) {
+ pr_err("usb_ep_enable failed eptype:OUT ep:%pK",
+ port->port_usb->out);
+ usb_ep_free_request(port->port_usb->out, port->rx_req);
+ port->rx_req = NULL;
+ port->port_usb->out->endless = false;
+ goto err_usb_out;
+ }
+ }
+
+ /* Wait for host to enable flow_control */
+ if (port->func_type == USB_IPA_FUNC_RNDIS) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = 0;
+ return ret;
+ }
+
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work (due to cable disconnect)
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+
+ queue_work(ipa_data_wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return ret;
+
+err_usb_out:
+ if (port->port_usb->in) {
+ usb_ep_disable(port->port_usb->in);
+ port->port_usb->in->endless = false;
+ }
+err_usb_in:
+ if (gp->in && port->tx_req) {
+ usb_ep_free_request(gp->in, port->tx_req);
+ port->tx_req = NULL;
+ }
+free_rx_req:
+ if (gp->out && port->rx_req) {
+ usb_ep_free_request(gp->out, port->rx_req);
+ port->rx_req = NULL;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+ pr_debug("%s(): failed with error:%d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): start endless RX\n", __func__);
+ ipa_data_start_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): start endless TX\n", __func__);
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, port->port_usb->in);
+ }
+ ipa_data_start_endless_xfer(port, true);
+ }
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+ struct ipa_data_ch_info *port = param;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+ pr_err("%s:port,cdev or gadget is NULL\n", __func__);
+ return;
+ }
+
+ gadget = port->port_usb->cdev->gadget;
+ if (dir == USB_TO_PEER_PERIPHERAL) {
+ pr_debug("%s(): stop endless RX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, false);
+ } else {
+ pr_debug("%s(): stop endless TX transfer\n", __func__);
+ ipa_data_stop_endless_xfer(port, true);
+ }
+}
+
+void ipa_data_flush_workqueue(void)
+{
+ pr_debug("%s(): Flushing workqueue\n", __func__);
+ flush_workqueue(ipa_data_wq);
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+ pr_debug("%s: suspended port %d\n", __func__, func);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s(): Port is NULL.\n", __func__);
+ return;
+ }
+
+ /* suspend with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ /*
+ * When remote wakeup is disabled, IPA BAM is disconnected
+ * because it cannot send new data until the USB bus is resumed.
+ * Endpoint descriptors info is saved before it gets reset by
+ * the BAM disconnect API. This lets us restore this info when
+ * the USB bus is resumed.
+ */
+ if (gp->in) {
+ gp->in_ep_desc_backup = gp->in->desc;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ }
+ if (gp->out) {
+ gp->out_ep_desc_backup = gp->out->desc;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ }
+ ipa_data_disconnect(gp, func);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->suspend_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: suspend started\n", __func__);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /* In case of RNDIS, host enables flow_control invoking connect_w. If it
+ * is delayed then we may end up having suspend_w run before connect_w.
+ * In this scenario, connect_w may or may not at all start if cable gets
+ * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+ * For these cases don't do runtime_put as there was no _get yet, and
+ * detect this condition on disconnect to not do extra pm_runtme_get
+ * for SUSPEND --> DISCONNECT scenario.
+ */
+ if (!port->is_connected) {
+ pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, port);
+ if (ret) {
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ usb_bam_register_start_stop_cbs(port->usb_bam_type,
+ port->dst_connection_idx, ipa_data_start,
+ ipa_data_stop, port);
+ /*
+ * release lock here because bam_data_start() or
+ * bam_data_stop() called from usb_bam_suspend()
+ * re-acquires port lock.
+ */
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Decrement usage count after IPA handshake is done
+ * to allow gadget parent to go to lpm. This counter was
+ * incremented upon cable connect.
+ */
+ usb_gadget_autopm_put_async(port->gadget);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+ struct usb_gadget *gadget = NULL;
+ u8 src_connection_idx = 0;
+ u8 dst_connection_idx = 0;
+ enum usb_ctrl usb_bam_type;
+
+ pr_debug("dev:%pK port number:%d\n", gp, func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("invalid ipa portno#%d\n", func);
+ return;
+ }
+
+ if (!gp) {
+ pr_err("data port is null\n");
+ return;
+ }
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("port %u is NULL", func);
+ return;
+ }
+
+ gadget = gp->cdev->gadget;
+ /* resume with remote wakeup disabled */
+ if (!remote_wakeup_enabled) {
+ int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
+
+ usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ /* Restore endpoint descriptors info. */
+ if (gp->in) {
+ gp->in->desc = gp->in_ep_desc_backup;
+ pr_debug("in_ep_desc_backup = %pK\n",
+ gp->in_ep_desc_backup);
+ dst_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ if (gp->out) {
+ gp->out->desc = gp->out_ep_desc_backup;
+ pr_debug("out_ep_desc_backup = %pK\n",
+ gp->out_ep_desc_backup);
+ src_connection_idx = usb_bam_get_connection_idx(
+ usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+ USB_BAM_DEVICE, bam_pipe_num);
+ }
+ ipa_data_connect(gp, func,
+ src_connection_idx, dst_connection_idx);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Increment usage count here to disallow gadget
+ * parent suspend. This counter will decrement
+ * after IPA handshake is done in disconnect work
+ * (due to cable disconnect) or in bam_data_disconnect
+ * in suspended state.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->resume_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+ connect_w);
+ struct usb_gadget *gadget;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !port->port_usb->cdev) {
+ pr_err("port->port_usb or cdev is NULL");
+ goto exit;
+ }
+
+ if (!port->port_usb->cdev->gadget) {
+ pr_err("port->port_usb->cdev->gadget is NULL");
+ goto exit;
+ }
+
+ pr_debug("%s: resume started\n", __func__);
+ gadget = port->port_usb->cdev->gadget;
+ if (!gadget) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Gadget is NULL.\n", __func__);
+ return;
+ }
+
+ ret = usb_bam_register_wake_cb(port->usb_bam_type,
+ port->dst_connection_idx, NULL, NULL);
+ if (ret) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_err("%s(): Failed to register BAM wake callback.\n",
+ __func__);
+ return;
+ }
+
+ if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+ configure_fifo(port->usb_bam_type, port->src_connection_idx,
+ port->port_usb->out);
+ configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+ port->port_usb->in);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ msm_dwc3_reset_dbm_ep(port->port_usb->in);
+ spin_lock_irqsave(&port->port_lock, flags);
+ usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+ }
+
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ if (ipa_data_ports[func] != NULL) {
+ pr_debug("port %d already allocated.\n", func);
+ return 0;
+ }
+
+ port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ipa_data_ports[func] = port;
+
+ pr_debug("port:%pK with portno:%d allocated\n", port, func);
+ return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @func_type: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port = NULL;
+
+ pr_debug("portno:%d\n", func);
+
+ port = ipa_data_ports[func];
+ port->port_num = func;
+ port->is_connected = false;
+
+ spin_lock_init(&port->port_lock);
+
+ if (!work_pending(&port->connect_w))
+ INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+ if (!work_pending(&port->disconnect_w))
+ INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
+ port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+ port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+ port->func_type = func;
+};
+
+void ipa_data_free(enum ipa_func_type func)
+{
+ pr_debug("freeing %d IPA BAM port", func);
+
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+ if (ipa_data_wq) {
+ destroy_workqueue(ipa_data_wq);
+ ipa_data_wq = NULL;
+ }
+}
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(enum ipa_func_type func)
+{
+ int ret;
+
+ pr_debug("requested %d IPA BAM port", func);
+
+ if (func >= USB_IPA_NUM_FUNCS) {
+ pr_err("Invalid num of ports count:%d\n", func);
+ return -EINVAL;
+ }
+
+ ret = ipa_data_port_alloc(func);
+ if (ret) {
+ pr_err("Failed to alloc port:%d\n", func);
+ return ret;
+ }
+
+ if (func == USB_IPA_FUNC_RNDIS) {
+ rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+ if (!rndis_data) {
+ pr_err("%s: fail allocate and initialize new instance\n",
+ __func__);
+ goto free_ipa_ports;
+ }
+ }
+ if (ipa_data_wq) {
+ pr_debug("ipa_data_wq is already setup.");
+ return 0;
+ }
+
+ ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!ipa_data_wq) {
+ pr_err("Failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto free_rndis_data;
+ }
+
+ return 0;
+
+free_rndis_data:
+ if (func == USB_IPA_FUNC_RNDIS)
+ kfree(rndis_data);
+free_ipa_ports:
+ kfree(ipa_data_ports[func]);
+ ipa_data_ports[func] = NULL;
+
+ return ret;
+}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->ul_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+ if (!max_transfer_size) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+ rndis_data->dl_max_transfer_size = max_transfer_size;
+ pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+ if (!max_packets_number) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return;
+ }
+
+ rndis_data->ul_max_packets_number = max_packets_number;
+
+ if (max_packets_number > 1)
+ rndis_data->ul_aggregation_enable = true;
+ else
+ rndis_data->ul_aggregation_enable = false;
+
+ pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+ __func__, rndis_data->ul_aggregation_enable,
+ max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (atomic_read(&port->pipe_connect_notified)) {
+ pr_debug("%s: Transfers already started?\n", __func__);
+ return;
+ }
+ /*
+ * Increment usage count upon cable connect. Decrement after IPA
+ * handshake is done in disconnect work due to cable disconnect
+ * or in suspend work.
+ */
+ usb_gadget_autopm_get_noresume(port->gadget);
+ queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+ struct ipa_data_ch_info *port;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ port = ipa_data_ports[func];
+ if (!port) {
+ pr_err("%s: port is NULL", __func__);
+ return;
+ }
+
+ if (!atomic_read(&port->pipe_connect_notified))
+ return;
+
+ rndis_ipa_reset_trigger();
+ ipa_data_stop_endless_xfer(port, true);
+ ipa_data_stop_endless_xfer(port, false);
+ spin_lock_irqsave(&port->port_lock, flags);
+ /* check if USB cable is disconnected or not */
+ if (port->port_usb) {
+ msm_ep_unconfig(port->port_usb->in);
+ msm_ep_unconfig(port->port_usb->out);
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 0000000..70d4293
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/gadget.h>
+#include <linux/cdev.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+enum ipa_func_type {
+ USB_IPA_FUNC_ECM,
+ USB_IPA_FUNC_MBIM,
+ USB_IPA_FUNC_RMNET,
+ USB_IPA_FUNC_RNDIS,
+ USB_IPA_FUNC_DPL,
+ USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
+
+struct gadget_ipa_port {
+ struct usb_composite_dev *cdev;
+ struct usb_function *func;
+ int rx_buffer_size;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ int ipa_consumer_ep;
+ int ipa_producer_ep;
+ const struct usb_endpoint_descriptor *in_ep_desc_backup;
+ const struct usb_endpoint_descriptor *out_ep_desc_backup;
+
+};
+
+struct ipa_function_bind_info {
+ struct usb_string *string_defs;
+ int data_str_idx;
+ struct usb_interface_descriptor *data_desc;
+ struct usb_endpoint_descriptor *fs_in_desc;
+ struct usb_endpoint_descriptor *fs_out_desc;
+ struct usb_endpoint_descriptor *fs_notify_desc;
+ struct usb_endpoint_descriptor *hs_in_desc;
+ struct usb_endpoint_descriptor *hs_out_desc;
+ struct usb_endpoint_descriptor *hs_notify_desc;
+ struct usb_endpoint_descriptor *ss_in_desc;
+ struct usb_endpoint_descriptor *ss_out_desc;
+ struct usb_endpoint_descriptor *ss_notify_desc;
+
+ struct usb_descriptor_header **fs_desc_hdr;
+ struct usb_descriptor_header **hs_desc_hdr;
+ struct usb_descriptor_header **ss_desc_hdr;
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN 40
+
+struct f_rndis_qc_opts {
+ struct usb_function_instance func_inst;
+ struct f_rndis_qc *rndis;
+ u32 vendor_id;
+ const char *manufacturer;
+ struct net_device *net;
+ int refcnt;
+};
+
+struct f_rmnet_opts {
+ struct usb_function_instance func_inst;
+ struct f_rmnet *dev;
+ int refcnt;
+};
+
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+ bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
+#if IS_ENABLED(CONFIG_USB_CONFIGFS_RMNET_BAM)
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons);
+#else
+static inline void gqti_ctrl_update_ipa_pipes(void *gr,
+ enum qti_port_type qport,
+ u32 ipa_prod, u32 ipa_cons)
+{
+}
+#endif /* CONFIG_USB_CONFIGFS_RMNET_BAM */
+#endif
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index 06eecd1..b4353ac 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -47,6 +47,8 @@
int idx;
struct usb_qdss_bam_connect_info bam_info;
struct usb_gadget *gadget;
+ struct device *dev;
+ int ret;
pr_debug("set_qdss_data_connection\n");
@@ -57,6 +59,7 @@
gadget = qdss->gadget;
usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ dev = gadget->dev.parent;
bam_info = qdss->bam_info;
/* There is only one qdss pipe, so the pipe number can be set to 0 */
@@ -68,6 +71,23 @@
}
if (enable) {
+ ret = get_qdss_bam_info(usb_bam_type, idx,
+ &bam_info.qdss_bam_phys,
+ &bam_info.qdss_bam_size);
+ if (ret) {
+ pr_err("%s(): failed to get qdss bam info err(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ bam_info.qdss_bam_iova = dma_map_resource(dev->parent,
+ bam_info.qdss_bam_phys, bam_info.qdss_bam_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (!bam_info.qdss_bam_iova) {
+ pr_err("dma_map_resource failed\n");
+ return -ENOMEM;
+ }
+
usb_bam_alloc_fifos(usb_bam_type, idx);
bam_info.data_fifo =
kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
@@ -76,25 +96,34 @@
usb_bam_free_fifos(usb_bam_type, idx);
return -ENOMEM;
}
+
+ pr_debug("%s(): qdss_bam: iova:%lx p_addr:%lx size:%x\n",
+ __func__, bam_info.qdss_bam_iova,
+ (unsigned long)bam_info.qdss_bam_phys,
+ bam_info.qdss_bam_size);
+
get_bam2bam_connection_info(usb_bam_type, idx,
&bam_info.usb_bam_pipe_idx,
NULL, bam_info.data_fifo, NULL);
alloc_sps_req(qdss->port.data);
msm_data_fifo_config(qdss->port.data,
- bam_info.data_fifo->phys_base,
- bam_info.data_fifo->size,
- bam_info.usb_bam_pipe_idx);
+ bam_info.data_fifo->iova,
+ bam_info.data_fifo->size,
+ bam_info.usb_bam_pipe_idx);
init_data(qdss->port.data);
res = usb_bam_connect(usb_bam_type, idx,
- &(bam_info.usb_bam_pipe_idx));
+ &(bam_info.usb_bam_pipe_idx),
+ bam_info.qdss_bam_iova);
} else {
- kfree(bam_info.data_fifo);
res = usb_bam_disconnect_pipe(usb_bam_type, idx);
if (res)
pr_err("usb_bam_disconnection error\n");
+ dma_unmap_resource(dev->parent, bam_info.qdss_bam_iova,
+ bam_info.qdss_bam_size, DMA_BIDIRECTIONAL, 0);
usb_bam_free_fifos(usb_bam_type, idx);
+ kfree(bam_info.data_fifo);
}
return res;
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 0000000..0126932
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "f_qdss.h"
+
+struct rmnet_ctrl_pkt {
+ void *buf;
+ int len;
+ struct list_head list;
+};
+
+struct grmnet {
+ /* to usb host, aka laptop, windows pc etc. Will
+ * be filled by usb driver of rmnet functionality
+ */
+ int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+ /* to modem, and to be filled by driver implementing
+ * control function
+ */
+ int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+ void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+
+ void (*disconnect)(struct grmnet *g);
+ void (*connect)(struct grmnet *g);
+};
+
+enum ctrl_client {
+ FRMNET_CTRL_CLIENT,
+ GPS_CTRL_CLIENT,
+
+ NR_CTRL_CLIENTS
+};
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned int intf);
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport);
+int gqti_ctrl_init(void);
+void gqti_ctrl_cleanup(void);
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 588546a..c7596a7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -220,7 +220,15 @@
* 3. xhci_plat is grandchild of a pci device (dwc3-pci)
*/
sysdev = &pdev->dev;
- if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+ /*
+ * If sysdev->parent->parent is available and part of IOMMU group
+ * (indicating possible usage of SMMU enablement), then use
+ * sysdev->parent->parent as sysdev.
+ */
+ if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node &&
+ sysdev->parent->parent && sysdev->parent->parent->iommu_group)
+ sysdev = sysdev->parent->parent;
+ else if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
sysdev = sysdev->parent;
#ifdef CONFIG_PCI
else if (sysdev->parent && sysdev->parent->parent &&
@@ -316,7 +324,7 @@
if (device_property_read_u32(&pdev->dev, "xhci-imod-value", &imod))
imod = 0;
- if (device_property_read_u32(sysdev, "usb-core-id", &xhci->core_id))
+ if (device_property_read_u32(&pdev->dev, "usb-core-id", &xhci->core_id))
xhci->core_id = -EINVAL;
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index f7ff9e8f..9c33c6e 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -45,6 +45,8 @@
#define FREEZIO_N BIT(1)
#define POWER_DOWN BIT(0)
+#define QUSB2PHY_PORT_TEST_CTRL 0xB8
+
#define QUSB2PHY_PWR_CTRL1 0x210
#define PWR_CTRL1_CLAMP_N_EN BIT(1)
#define PWR_CTRL1_POWR_DOWN BIT(0)
@@ -68,10 +70,7 @@
#define QUSB2PHY_PORT_TUNE2 0x84
#define QUSB2PHY_PORT_TUNE3 0x88
#define QUSB2PHY_PORT_TUNE4 0x8C
-
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE 0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE 0x3
+#define QUSB2PHY_PORT_TUNE5 0x90
/* Get TUNE2's high nibble value read from efuse */
#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask) ((val >> pos) & mask)
@@ -98,21 +97,42 @@
#define QUSB2PHY_REFCLK_ENABLE BIT(0)
-unsigned int tune2;
-module_param(tune2, uint, S_IRUGO | S_IWUSR);
+static unsigned int tune1;
+module_param(tune1, uint, 0644);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
+static unsigned int tune2;
+module_param(tune2, uint, 0644);
MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+static unsigned int tune3;
+module_param(tune3, uint, 0644);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+static unsigned int tune4;
+module_param(tune4, uint, 0644);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+static unsigned int tune5;
+module_param(tune5, uint, 0644);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
void __iomem *tune2_efuse_reg;
void __iomem *ref_clk_base;
+ void __iomem *tcsr_clamp_dig_n;
struct clk *ref_clk_src;
struct clk *ref_clk;
struct clk *cfg_ahb_clk;
struct reset_control *phy_reset;
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct regulator *gdsc;
struct regulator *vdd;
struct regulator *vdda33;
struct regulator *vdda18;
@@ -124,6 +144,7 @@
u32 tune2_val;
int tune2_efuse_bit_pos;
int tune2_efuse_num_of_bits;
+ int tune2_efuse_correction;
bool power_enabled;
bool clocks_enabled;
@@ -145,6 +166,8 @@
int phy_pll_reset_seq_len;
int *emu_dcm_reset_seq;
int emu_dcm_reset_seq_len;
+ bool put_into_high_z_state;
+ struct mutex phy_lock;
};
static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -155,14 +178,22 @@
if (!qphy->clocks_enabled && on) {
clk_prepare_enable(qphy->ref_clk_src);
clk_prepare_enable(qphy->ref_clk);
+ clk_prepare_enable(qphy->iface_clk);
+ clk_prepare_enable(qphy->core_clk);
clk_prepare_enable(qphy->cfg_ahb_clk);
qphy->clocks_enabled = true;
}
if (qphy->clocks_enabled && !on) {
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ /*
+ * FSM depedency beween iface_clk and core_clk.
+ * Hence turned off core_clk before iface_clk.
+ */
+ clk_disable_unprepare(qphy->core_clk);
+ clk_disable_unprepare(qphy->iface_clk);
clk_disable_unprepare(qphy->ref_clk);
clk_disable_unprepare(qphy->ref_clk_src);
- clk_disable_unprepare(qphy->cfg_ahb_clk);
qphy->clocks_enabled = false;
}
@@ -170,6 +201,32 @@
qphy->clocks_enabled);
}
+static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qphy->gdsc))
+ return -EPERM;
+
+ if (on) {
+ dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n");
+ ret = regulator_enable(qphy->gdsc);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to enable gdsc\n");
+ return ret;
+ }
+ } else {
+ dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n");
+ ret = regulator_disable(qphy->gdsc);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to disable gdsc\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
{
int min, ret;
@@ -313,6 +370,7 @@
{
u8 num_of_bits;
u32 bit_mask = 1;
+ u8 reg_val;
pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
qphy->tune2_efuse_num_of_bits,
@@ -326,9 +384,8 @@
/*
* Read EFUSE register having TUNE2 parameter's high nibble.
- * If efuse register shows value as 0x0, then use default value
- * as 0xB as high nibble. Otherwise use efuse register based
- * value for this purpose.
+ * If efuse register shows value as 0x0, then use previous value
+ * as it is. Otherwise use efuse register based value for this purpose.
*/
qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
@@ -337,12 +394,24 @@
qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
qphy->tune2_efuse_bit_pos, bit_mask);
- if (!qphy->tune2_val)
- qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
+ /* Update higher nibble of TUNE2 value for better rise/fall times */
+ if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+ if (qphy->tune2_efuse_correction > 5 ||
+ qphy->tune2_efuse_correction < -10)
+ pr_warn("Correction value is out of range : %d\n",
+ qphy->tune2_efuse_correction);
+ else
+ qphy->tune2_val = qphy->tune2_val +
+ qphy->tune2_efuse_correction;
+ }
- /* Get TUNE2 byte value using high and low nibble value */
- qphy->tune2_val = ((qphy->tune2_val << 0x4) |
- TUNE2_DEFAULT_LOW_NIBBLE);
+ reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+ if (qphy->tune2_val) {
+ reg_val &= 0x0f;
+ reg_val |= (qphy->tune2_val << 4);
+ }
+
+ qphy->tune2_val = reg_val;
}
static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -450,7 +519,7 @@
* and try to read EFUSE value only once i.e. not every USB
* cable connect case.
*/
- if (qphy->tune2_efuse_reg) {
+ if (qphy->tune2_efuse_reg && !tune2) {
if (!qphy->tune2_val)
qusb_phy_get_tune2_param(qphy);
@@ -460,13 +529,29 @@
qphy->base + QUSB2PHY_PORT_TUNE2);
}
- /* If tune2 modparam set, override tune2 value */
- if (tune2) {
- pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
- __func__, tune2);
+ /* If tune modparam set, override tune value */
+
+ pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+ __func__, tune1, tune2, tune3, tune4, tune5);
+ if (tune1)
+ writel_relaxed(tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+
+ if (tune2)
writel_relaxed(tune2,
qphy->base + QUSB2PHY_PORT_TUNE2);
- }
+
+ if (tune3)
+ writel_relaxed(tune3,
+ qphy->base + QUSB2PHY_PORT_TUNE3);
+
+ if (tune4)
+ writel_relaxed(tune4,
+ qphy->base + QUSB2PHY_PORT_TUNE4);
+
+ if (tune5)
+ writel_relaxed(tune5,
+ qphy->base + QUSB2PHY_PORT_TUNE5);
/* ensure above writes are completed before re-enabling PHY */
wmb();
@@ -596,27 +681,55 @@
writel_relaxed(intr_mask,
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+ if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+ /* enable phy auto-resume */
+ writel_relaxed(0x0C,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x04,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ }
+
+
+ dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+ __func__, intr_mask);
+
+ /* Makes sure that above write goes through */
+ wmb();
+
qusb_phy_enable_clocks(qphy, false);
} else { /* Disconnect case */
+ mutex_lock(&qphy->phy_lock);
/* Disable all interrupts */
writel_relaxed(0x00,
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
- /*
- * Phy in non-driving mode leaves Dp and Dm lines in
- * high-Z state. Controller power collapse is not
- * switching phy to non-driving mode causing charger
- * detection failure. Bring phy to non-driving mode by
- * overriding controller output via UTMI interface.
- */
- writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
- OP_MODE_NON_DRIVE,
- qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
- writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
- qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+ /* Disable PHY */
+ writel_relaxed(POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false);
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0,
+ qphy->tcsr_clamp_dig_n);
+ /* Do not disable power rails if there is vote for it */
+ if (!qphy->dpdm_enable)
+ qusb_phy_enable_power(qphy, false);
+ else
+ dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+ mutex_unlock(&qphy->phy_lock);
+
+ /*
+ * Set put_into_high_z_state to true so next USB
+ * cable connect, DPF_DMF request performs PHY
+ * reset and put it into high-z state. For bootup
+ * with or without USB cable, it doesn't require
+ * to put QUSB PHY into high-z state.
+ */
+ qphy->put_into_high_z_state = true;
}
qphy->suspended = true;
} else {
@@ -629,6 +742,9 @@
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
} else {
qusb_phy_enable_power(qphy, true);
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
qusb_phy_enable_clocks(qphy, true);
}
qphy->suspended = false;
@@ -669,15 +785,61 @@
dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
__func__, qphy->dpdm_enable);
+ mutex_lock(&qphy->phy_lock);
if (!qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, true);
if (ret < 0) {
dev_dbg(qphy->phy.dev,
"dpdm regulator enable failed:%d\n", ret);
+ mutex_unlock(&qphy->phy_lock);
return ret;
}
qphy->dpdm_enable = true;
+ if (qphy->put_into_high_z_state) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
+
+ qusb_phy_gdsc(qphy, true);
+ qusb_phy_enable_clocks(qphy, true);
+
+ dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n");
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "phyassert failed\n");
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "deassert failed\n");
+
+ /*
+ * Phy in non-driving mode leaves Dp and Dm
+ * lines in high-Z state. Controller power
+ * collapse is not switching phy to non-driving
+ * mode causing charger detection failure. Bring
+ * phy to non-driving mode by overriding
+ * controller output via UTMI interface.
+ */
+ writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+ OP_MODE_NON_DRIVE,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+ writel_relaxed(UTMI_ULPI_SEL |
+ UTMI_TEST_MUX_SEL,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+ /* Disable PHY */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+ POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ qusb_phy_gdsc(qphy, false);
+ }
}
+ mutex_unlock(&qphy->phy_lock);
return ret;
}
@@ -690,19 +852,25 @@
dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
__func__, qphy->dpdm_enable);
+ mutex_lock(&qphy->phy_lock);
if (qphy->dpdm_enable) {
if (!qphy->cable_connected) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0,
+ qphy->tcsr_clamp_dig_n);
dev_dbg(qphy->phy.dev, "turn off for HVDCP case\n");
ret = qusb_phy_enable_power(qphy, false);
if (ret < 0) {
dev_dbg(qphy->phy.dev,
"dpdm regulator disable failed:%d\n",
ret);
+ mutex_unlock(&qphy->phy_lock);
return ret;
}
}
qphy->dpdm_enable = false;
}
+ mutex_unlock(&qphy->phy_lock);
return ret;
}
@@ -794,6 +962,9 @@
"qcom,tune2-efuse-num-bits",
&qphy->tune2_efuse_num_of_bits);
}
+ of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-correction",
+ &qphy->tune2_efuse_correction);
if (ret) {
dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
@@ -829,6 +1000,17 @@
}
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_clamp_dig_n_1p8");
+ if (res) {
+ qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+ dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+ qphy->tcsr_clamp_dig_n = NULL;
+ }
+ }
+
qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
if (IS_ERR(qphy->ref_clk_src))
dev_dbg(dev, "clk get failed for ref_clk_src\n");
@@ -847,6 +1029,34 @@
if (IS_ERR(qphy->phy_reset))
return PTR_ERR(qphy->phy_reset);
+ if (of_property_match_string(dev->of_node,
+ "clock-names", "iface_clk") >= 0) {
+ qphy->iface_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(qphy->iface_clk)) {
+ ret = PTR_ERR(qphy->iface_clk);
+ qphy->iface_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(dev, "couldn't get iface_clk(%d)\n", ret);
+ }
+ }
+
+ if (of_property_match_string(dev->of_node,
+ "clock-names", "core_clk") >= 0) {
+ qphy->core_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(qphy->core_clk)) {
+ ret = PTR_ERR(qphy->core_clk);
+ qphy->core_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(dev, "couldn't get core_clk(%d)\n", ret);
+ }
+ }
+
+ qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC");
+ if (IS_ERR(qphy->gdsc))
+ qphy->gdsc = NULL;
+
qphy->emulation = of_property_read_bool(dev->of_node,
"qcom,emulation");
@@ -981,6 +1191,7 @@
return PTR_ERR(qphy->vdda18);
}
+ mutex_init(&qphy->phy_lock);
platform_set_drvdata(pdev, qphy);
qphy->phy.label = "msm-qusb-phy";
@@ -1010,6 +1221,10 @@
if (ret)
usb_remove_phy(&qphy->phy);
+ /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
return ret;
}
diff --git a/include/linux/input/qpnp-power-on.h b/include/linux/input/qpnp-power-on.h
index a2624ab..5944f0f 100644
--- a/include/linux/input/qpnp-power-on.h
+++ b/include/linux/input/qpnp-power-on.h
@@ -51,6 +51,7 @@
};
enum pon_restart_reason {
+ /* 0 ~ 31 for common defined features */
PON_RESTART_REASON_UNKNOWN = 0x00,
PON_RESTART_REASON_RECOVERY = 0x01,
PON_RESTART_REASON_BOOTLOADER = 0x02,
@@ -58,6 +59,10 @@
PON_RESTART_REASON_DMVERITY_CORRUPTED = 0x04,
PON_RESTART_REASON_DMVERITY_ENFORCE = 0x05,
PON_RESTART_REASON_KEYS_CLEAR = 0x06,
+
+ /* 32 ~ 63 for OEMs/ODMs secific features */
+ PON_RESTART_REASON_OEM_MIN = 0x20,
+ PON_RESTART_REASON_OEM_MAX = 0x3f,
};
#ifdef CONFIG_INPUT_QPNP_POWER_ON
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f56e98..b2eb2d0 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1120,12 +1120,12 @@
int msm_ep_config(struct usb_ep *ep);
int msm_ep_unconfig(struct usb_ep *ep);
void dwc3_tx_fifo_resize_request(struct usb_ep *ep, bool qdss_enable);
-int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr, u32 size,
+int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr, u32 size,
u8 dst_pipe_idx);
bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget);
int msm_dwc3_reset_dbm_ep(struct usb_ep *ep);
#else
-static inline int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+static inline int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr,
u32 size, u8 dst_pipe_idx)
{ return -ENODEV; }
diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h
index 1b0ca4a..84d7549 100644
--- a/include/linux/usb_bam.h
+++ b/include/linux/usb_bam.h
@@ -245,10 +245,13 @@
*
* @bam_pipe_idx - allocated pipe index.
*
+ * @iova - IPA address of USB peer BAM (i.e. QDSS BAM)
+ *
* @return 0 on success, negative value on error
*
*/
-int usb_bam_connect(enum usb_ctrl bam_type, int idx, u32 *bam_pipe_idx);
+int usb_bam_connect(enum usb_ctrl bam_type, int idx, u32 *bam_pipe_idx,
+ unsigned long iova);
/**
* Connect USB-to-IPA SPS connection.
@@ -430,12 +433,14 @@
/* Frees memory for data fifo and descriptor fifos. */
int usb_bam_free_fifos(enum usb_ctrl cur_bam, u8 idx);
-
+int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+ phys_addr_t *p_addr, u32 *bam_size);
bool msm_bam_hsic_lpm_ok(void);
bool msm_bam_hsic_host_pipe_empty(void);
bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
#else
-static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx)
+static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx,
+ unsigned long iova)
{
return -ENODEV;
}
@@ -529,6 +534,11 @@
return false;
}
+static int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+ phys_addr_t *p_addr, u32 *bam_size)
+{
+ return false;
+}
static inline bool msm_bam_hsic_lpm_ok(void) { return true; }
static inline bool msm_bam_hsic_host_pipe_empty(void) { return true; }
static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)