Merge "soc: qcom: dcc: Add snapshot of qcom-dcc driver"
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 3b4436e..bdba526 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -11,6 +11,8 @@
  * "qcom,scm-msm8660" for MSM8660 platforms
  * "qcom,scm-msm8690" for MSM8690 platforms
  * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+ * "android,firmware" for firmware image
+ * "android,vbmeta" for setting system properties for verified boot.
 - clocks: One to three clocks may be required based on compatible.
  * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
  * Core, iface, and bus clocks required for "qcom,scm"
@@ -26,3 +28,26 @@
 			clock-names = "core", "bus", "iface";
 		};
 	};
+
+Example for SDM845:
+
+	firmware {
+		android {
+			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo";
+			};
+
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait,slotselect,avb";
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6405371..f8c8a69 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -160,6 +160,9 @@
    "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+   "tcsr_clamp_dig_n" : To enable/disable digital clamp to the phy. When
+   de-asserted, it will prevent random leakage from qusb2 phy resulting from
+   out of sequence turn on/off of 1p8, 3p3 and DVDD regulators.
    "refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -179,6 +182,8 @@
  - qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
  - pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active"
    state when attached in host mode and "suspend" state when detached.
+ - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
+   improved rise/fall times.
 
 Example:
 	qusb_phy: qusb@f9b39000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a491bd7..a37e441 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,6 +24,7 @@
 ams	AMS AG
 amstaos	AMS-Taos Inc.
 analogix	Analogix Semiconductor, Inc.
+android	Google
 apm	Applied Micro Circuits Corporation (APM)
 aptina	Aptina Imaging
 arasan	Arasan Chip Systems
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
index 7819d26..e6dc45a 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-bus.dtsi
@@ -23,6 +23,9 @@
 		reg-names = "mc_virt-base", "mem_noc-base",
 			"system_noc-base", "ipa_virt-base";
 
+		mbox-names = "apps_rsc";
+		mboxes = <&apps_rsc 0>;
+
 		/*RSCs*/
 		rsc_apps: rsc-apps {
 			cell-id = <MSM_BUS_RSC_APPS>;
@@ -360,7 +363,7 @@
 			label = "mas-qhm-qpic";
 			qcom,buswidth = <4>;
 			qcom,agg-ports = <1>;
-			qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+			qcom,connections = <&slv_qns_aggre_noc>;
 			qcom,bus-dev = <&fab_system_noc>;
 			qcom,bcms = <&bcm_pn3>;
 		};
@@ -451,7 +454,7 @@
 			qcom,buswidth = <8>;
 			qcom,agg-ports = <1>;
 			qcom,qport = <1>;
-			qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+			qcom,connections = <&slv_qns_aggre_noc>;
 			qcom,bus-dev = <&fab_system_noc>;
 			qcom,bcms = <&bcm_ce>, <&bcm_pn5>;
 			qcom,ap-owned;
@@ -538,7 +541,7 @@
 			qcom,buswidth = <8>;
 			qcom,agg-ports = <1>;
 			qcom,qport = <8>;
-			qcom,connections = <&slv_qhs_aoss &slv_qns_aggre_noc>;
+			qcom,connections = <&slv_qns_aggre_noc>;
 			qcom,bus-dev = <&fab_system_noc>;
 			qcom,bcms = <&bcm_pn1>;
 			qcom,ap-owned;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
index 2148cc9..65467f9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
@@ -338,4 +338,39 @@
 			};
 		};
 	};
+
+	xo-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmxpoorwills_vadc 0x4c>;
+		thermal-governor = "user_space";
+	};
+
+	pa-therm1-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmxpoorwills_vadc 0x4d>;
+		thermal-governor = "user_space";
+	};
+
+	pa-therm2-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmxpoorwills_vadc 0x4e>;
+		thermal-governor = "user_space";
+	};
+
+	mdm-case-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmxpoorwills_vadc 0x4f>;
+		thermal-governor = "user_space";
+	};
+
+	ambient-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmxpoorwills_vadc 0x52>;
+		thermal-governor = "user_space";
+	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index def0e13..d9258d8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -12,6 +12,7 @@
  */
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
 
 &soc {
 	/* USB port for DWC3 controller */
@@ -47,6 +48,19 @@
 		resets = <&clock_gcc GCC_USB30_BCR>;
 		reset-names = "core_reset";
 
+		qcom,msm-bus,name = "usb";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <3>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+			<MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 0>,
+			<MSM_BUS_MASTER_USB3
+				MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
+			<MSM_BUS_MASTER_USB3
+				MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 40000>;
+
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index ce38838..b6393a91 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -306,10 +306,10 @@
 		};
 };
 
-	restart@4ab000 {
+	restart@c264000 {
 		compatible = "qcom,pshold";
-		reg = <0x4ab000 0x4>,
-			<0x193d100 0x4>;
+		reg = <0x0c264000 0x4>,
+			<0x01fd3000 0x4>;
 		reg-names = "pshold-base", "tcsr-boot-misc-detect";
 	};
 
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index 944fe67..fd1cac3 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -358,6 +358,8 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index c744f9e..c126ccd 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -370,6 +370,8 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 46eb60b..d0568aa 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -313,6 +313,8 @@
 CONFIG_MSM_QMP=y
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_GLINK=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -329,6 +331,7 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PM=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 4761bc5..8ca4247 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -43,7 +43,6 @@
 	select CPU_V7
 	select HAVE_ARM_ARCH_TIMER
 	select MSM_CORTEX_A7
-	select COMMON_CLK_MSM
 	select PINCTRL
 	select QCOM_SCM if SMP
 	select MSM_JTAG_MM if CORESIGHT_ETM
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6531949..96f43d6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -124,6 +124,7 @@
 config ARCH_SDM845
 	bool "Enable Support for Qualcomm Technologies Inc. SDM845"
 	depends on ARCH_QCOM
+	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
@@ -133,6 +134,7 @@
 config ARCH_SDM670
 	bool "Enable Support for Qualcomm Technologies Inc. SDM670"
 	depends on ARCH_QCOM
+	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index d5dc94e..86c8836 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -39,6 +39,11 @@
 		qcom,dcs-cmd-by-left;
 		qcom,mdss-dsi-tx-eot-append;
 		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 
 		qcom,mdss-dsi-display-timings {
 			timing@0{
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index 6a4200d..66beead 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -33,6 +33,11 @@
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 
 		qcom,mdss-dsi-display-timings {
 			timing@0{
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 7705d01..1990b65 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -17,42 +17,42 @@
 	qcom,fastchg-current-ma = <3450>;
 	qcom,batt-id-kohm = <60>;
 	qcom,battery-beta = <3435>;
-	qcom,battery-type = "ascent_3450mah_averaged_masterslave_jul11th2017";
-	qcom,checksum = <0x7C33>;
+	qcom,battery-type = "ascent_3450mah_averaged_masterslave_oct30th2017";
+	qcom,checksum = <0xAAE2>;
 	qcom,gui-version = "PMI8998GUI - 2.0.0.58";
 	qcom,fg-profile-data = [
 		8F 1F 94 05
 		73 0A 4A 06
 		27 1D 21 EA
-		16 0A 3B 0C
+		16 0A 3A 0C
 		07 18 97 22
 		A5 3C EC 4A
 		5C 00 00 00
 		10 00 00 00
-		00 00 92 BC
-		CD BD 02 B4
+		00 00 43 C5
+		92 BC 89 BB
 		11 00 08 00
 		69 DA AD 07
 		4B FD 19 FA
-		1D 0C B0 0C
+		7E 01 49 13
 		EB F3 78 3B
 		24 06 09 20
 		27 00 14 00
 		7E 1F F2 05
-		19 0A 55 FD
-		6C 1D C6 ED
+		19 0A AB 06
+		6C 1D B9 07
 		1A 12 FF 1D
 		6F 18 EB 22
 		B9 45 6F 52
 		55 00 00 00
 		0E 00 00 00
-		00 00 A1 D5
-		34 BA A0 CA
+		00 00 33 CC
+		72 CA B3 C4
 		0F 00 00 00
 		93 00 AD 07
 		8D FD F6 00
-		BA 0D 5C 04
-		B3 FC F4 1B
+		6F E3 44 0B
+		AB FC F9 1B
 		C3 33 CC FF
 		07 10 00 00
 		A4 0D 99 45
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index a9ca87c..0a2b814 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -257,6 +257,229 @@
 		qcom,pipe-attr-ee;
 	};
 
+	thermal_zones: thermal-zones {
+		mdm-core-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 1>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		qdsp-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 2>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		camera-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&tsens0 3>;
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 4>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 5>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 6>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_cpu3-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 7>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc1_l2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 8>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 9>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 10>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 11>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_cpu3-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 12>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		apc0_l2-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 13>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		gpu0-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 14>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		gpu1-usr {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 15>;
+			thermal-governor = "user_space";
+			trips {
+				active-config0 {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+	};
+
+	tsens0: tsens@4a8000 {
+		compatible = "qcom,msm8953-tsens";
+		reg = <0x4a8000 0x1000>,
+			<0x4a9000 0x1000>;
+		reg-names = "tsens_srot_physical",
+					"tsens_tm_physical";
+		interrupts = <0 184 0>, <0 314 0>;
+		interrupt-names = "tsens-upper-lower", "tsens-critical";
+		#thermal-sensor-cells = <1>;
+	};
+
 	blsp1_uart0: serial@78af000 {
 		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 		reg = <0x78af000 0x200>;
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 60162e3..0ddb9f5 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -82,6 +82,12 @@
 			mpp@a100 {
 				reg = <0xa100 0x100>;
 				qcom,pin-num = <2>;
+				/* MPP2 - PA_THERM config */
+				qcom,mode = <4>; /* AIN input */
+				qcom,invert = <1>; /* Enable MPP */
+				qcom,ain-route = <1>; /* AMUX 6 */
+				qcom,master-en = <1>;
+				qcom,src-sel = <0>; /* Function constant */
 			};
 
 			mpp@a200 {
@@ -93,6 +99,12 @@
 			mpp@a300 {
 				reg = <0xa300 0x100>;
 				qcom,pin-num = <4>;
+				/* MPP4 - CASE_THERM config */
+				qcom,mode = <4>; /* AIN input */
+				qcom,invert = <1>; /* Enable MPP */
+				qcom,ain-route = <3>; /* AMUX 8 */
+				qcom,master-en = <1>;
+				qcom,src-sel = <0>; /* Function constant */
 			};
 		};
 
@@ -165,6 +177,28 @@
 			qcom,adc-vdd-reference = <1800>;
 			qcom,vadc-poll-eoc;
 
+			chan@5 {
+				label = "vcoin";
+				reg = <5>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@7 {
+				label = "vph_pwr";
+				reg = <7>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
 			chan@8 {
 				label = "die_temp";
 				reg = <8>;
@@ -208,6 +242,63 @@
 				qcom,hw-settle-time = <0>;
 				qcom,fast-avg-setup = <0>;
 			};
+
+			chan@36 {
+				label = "pa_therm0";
+				reg = <0x36>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@11 {
+				label = "pa_therm1";
+				reg = <0x11>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+
+			chan@32 {
+				label = "xo_therm";
+				reg = <0x32>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@3c {
+				label = "xo_therm_buf";
+				reg = <0x3c>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+			chan@13 {
+				label = "case_therm";
+				reg = <0x13>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
 		};
 
 		pm8953_adc_tm: vadc@3400 {
@@ -224,7 +315,6 @@
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
 			qcom,adc_tm-vadc = <&pm8953_vadc>;
-
 		};
 
 		pm8953_rtc: qcom,pm8953_rtc {
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 007081a..2f4b00e 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -103,6 +103,7 @@
 			qcom,thermal-mitigation
 					= <3000000 1500000 1000000 500000>;
 			qcom,auto-recharge-soc;
+			qcom,suspend-input-on-debug-batt;
 
 			qcom,chgr@1000 {
 				reg = <0x1000 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-mtp.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-mtp.dtsi
index b46cbfd..025d9a2 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-mtp.dtsi
@@ -10,6 +10,10 @@
  * GNU General Public License for more details.
  */
 
+#include "pm8005.dtsi"
+#include "sdm670-pmic-overlay.dtsi"
+#include "qcs605-pm660-pm8005-regulator.dtsi"
+
 / {
 	cpus {
 		/delete-node/ cpu@200;
@@ -111,3 +115,91 @@
 		};
 	};
 };
+
+&spmi_bus {
+	/delete-node/ qcom,pm660l@2;
+	/delete-node/ qcom,pm660l@3;
+};
+
+&thermal_zones {
+	pm660l_tz {
+		/delete-property/ thermal-sensors;
+	};
+};
+
+&soc {
+	qcom,turing@8300000 {
+		/delete-property/ vdd_cx-supply;
+	};
+
+	qcom,lpass@62400000 {
+		/delete-property/ vdd_cx-supply;
+	};
+};
+
+&clock_cpucc {
+	/delete-property/ vdd_l3_mx_ao-supply;
+	/delete-property/ vdd_pwrcl_mx_ao-supply;
+};
+
+&clock_gcc {
+	/delete-property/ vdd_cx-supply;
+	/delete-property/ vdd_cx_ao-supply;
+};
+
+&clock_videocc {
+	/delete-property/ vdd_cx-supply;
+};
+
+&clock_camcc {
+	/delete-property/ vdd_mx-supply;
+	/delete-property/ vdd_cx-supply;
+};
+
+&clock_dispcc {
+	/delete-property/ vdd_cx-supply;
+};
+
+&clock_gpucc {
+	/delete-property/ vdd_mx-supply;
+	/delete-property/ vdd_cx-supply;
+};
+
+&pil_modem {
+	/delete-property/ vdd_mx-supply;
+	/delete-property/ vdd_cx-supply;
+	/delete-property/ vdd_mss-supply;
+};
+
+&clock_gfx {
+	/delete-property/ vdd_gfx-supply;
+};
+
+&gpu_gx_gdsc {
+	/delete-property/ parent-supply;
+};
+
+&mdss_dsi_phy0 {
+	/delete-property/ vdda-0p9-supply;
+};
+
+&mdss_dsi_phy1 {
+	/delete-property/ vdda-0p9-supply;
+};
+
+&sde_dp {
+	/delete-property/ vdda-0p9-supply;
+};
+
+&qusb_phy0 {
+	/delete-property/ vdd-supply;
+	/delete-property/ vdda33-supply;
+};
+
+&usb_qmp_dp_phy {
+	/delete-property/ vdd-supply;
+};
+
+&pm660_pdphy {
+	/delete-property/ vdd-pdphy-supply;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
new file mode 100644
index 0000000..a881ec4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
@@ -0,0 +1,474 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR  PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+&soc {
+	/* Delete all regulators */
+	/delete-node/ rpmh-regulator-smpa4;
+	/delete-node/ rpmh-regulator-modemlvl;
+	/delete-node/ rpmh-regulator-smpa6;
+	/delete-node/ rpmh-regulator-mxlvl;
+	/delete-node/ rpmh-regulator-gfxlvl;
+	/delete-node/ rpmh-regulator-cxlvl;
+	/delete-node/ rpmh-regulator-ldoa1;
+	/delete-node/ rpmh-regulator-ldoa2;
+	/delete-node/ rpmh-regulator-ldoa3;
+	/delete-node/ rpmh-regulator-ldoa5;
+	/delete-node/ rpmh-regulator-ldoa6;
+	/delete-node/ rpmh-regulator-ldoa7;
+	/delete-node/ rpmh-regulator-ldoa8;
+	/delete-node/ rpmh-regulator-ldoa9;
+	/delete-node/ rpmh-regulator-ldoa10;
+	/delete-node/ rpmh-regulator-ldoa11;
+	/delete-node/ rpmh-regulator-ldoa12;
+	/delete-node/ rpmh-regulator-ldoa13;
+	/delete-node/ rpmh-regulator-ldoa14;
+	/delete-node/ rpmh-regulator-ldoa15;
+	/delete-node/ rpmh-regulator-ldoa16;
+	/delete-node/ rpmh-regulator-ldoa17;
+	/delete-node/ rpmh-regulator-ldoa19;
+	/delete-node/ rpmh-regulator-ldob1;
+	/delete-node/ rpmh-regulator-ldob2;
+	/delete-node/ rpmh-regulator-ldob3;
+	/delete-node/ rpmh-regulator-ldob4;
+	/delete-node/ rpmh-regulator-ldob5;
+	/delete-node/ rpmh-regulator-ldob6;
+	/delete-node/ rpmh-regulator-ldob7;
+	/delete-node/ rpmh-regulator-ldob8;
+	/delete-node/ rpmh-regulator-lcxlvl;
+	/delete-node/ rpmh-regulator-lmxlvl;
+	/delete-node/ rpmh-regulator-bobb1;
+
+	/* RPMh regulators */
+
+	/* pm660 S2 - VDD_MX supply */
+	rpmh-regulator-mxlvl {
+		compatible = "qcom,rpmh-arc-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "mx.lvl";
+		pm660_s2_level: regulator-pm660-s2 {
+			regulator-name = "pm660_s2_level";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+		};
+
+		pm660_s2_level_ao: regulator-pm660-s2-level-ao {
+			regulator-name = "pm660_s2_level_ao";
+			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+		};
+
+		mx_cdev: mx-cdev-lvl {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm660_s2_level>;
+			regulator-levels = <RPMH_REGULATOR_LEVEL_NOM
+					RPMH_REGULATOR_LEVEL_OFF>;
+			#cooling-cells = <2>;
+		};
+	};
+
+	rpmh-regulator-smpa4 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "smpa4";
+		pm660_s4: regulator-pm660-s4 {
+			regulator-name = "pm660_s4";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1640000>;
+			regulator-max-microvolt = <2040000>;
+			qcom,init-voltage = <1640000>;
+		};
+	};
+
+	/* pm8005 S1 + S4 - VDD_CX supply */
+	rpmh-regulator-cxlvl {
+		compatible = "qcom,rpmh-arc-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "cx.lvl";
+		pm8005_s1_level-parent-supply = <&pm660_s2_level>;
+		pm8005_s1_level_ao-parent-supply = <&pm660_s2_level_ao>;
+		pm8005_s1_level: regulator-pm8005-s1-level {
+			regulator-name = "pm8005_s1_level";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,min-dropout-voltage-level = <(-1)>;
+		};
+
+		pm8005_s1_level_ao: regulator-pm8005-s1-level-ao {
+			regulator-name = "pm8005_s1_level_ao";
+			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,min-dropout-voltage-level = <(-1)>;
+		};
+
+		cx_cdev: regulator-cdev {
+			compatible = "qcom,rpmh-reg-cdev";
+			mboxes = <&qmp_aop 0>;
+			qcom,reg-resource-name = "cx";
+			#cooling-cells = <2>;
+		};
+	};
+
+	rpmh-regulator-smpc2 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "smpc2";
+		pm8005_s2: regulator-pm8005-s2 {
+			regulator-name = "pm8005_s2";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1000000>;
+		};
+	};
+
+	/* pm8005 S3 - VDD_GFX supply */
+	rpmh-regulator-gfxlvl {
+		compatible = "qcom,rpmh-arc-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "gfx.lvl";
+		pm8005_s3_level: regulator-pm8005-s3 {
+			regulator-name = "pm8005_s3_level";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+			regulator-max-microvolt
+				= <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,init-voltage-level
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+		};
+	};
+
+	rpmh-regulator-ldoa1 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa1";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l1: regulator-pm660-l1 {
+			regulator-name = "pm660_l1";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <800000>;
+			regulator-max-microvolt = <800000>;
+			qcom,init-voltage = <800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa2 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa2";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l2: regulator-pm660-l2 {
+			regulator-name = "pm660_l2";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1144000>;
+			regulator-max-microvolt = <1256000>;
+			qcom,init-voltage = <1144000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa3 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa3";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l3: regulator-pm660-l3 {
+			regulator-name = "pm660_l3";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1352000>;
+			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa5 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa5";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l5: regulator-pm660-l5 {
+			regulator-name = "pm660_l5";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1304000>;
+			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa6 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa6";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l6: regulator-pm660-l6 {
+			regulator-name = "pm660_l6";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <880000>;
+			regulator-max-microvolt = <880000>;
+			qcom,init-voltage = <880000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	/* pm660 L7 = VDD_LPI_CX supply */
+	rpmh-regulator-lcxlvl {
+		compatible = "qcom,rpmh-arc-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "lcx.lvl";
+		pm660_l7_level: regulator-pm660-l7-level {
+			regulator-name = "pm660_l7_level";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+		};
+	};
+
+	rpmh-regulator-ldoa8 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa8";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l8: regulator-pm660-l8 {
+			regulator-name = "pm660_l8";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1952000>;
+			qcom,init-voltage = <1696000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa9 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa9";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l9: regulator-pm660-l9 {
+			regulator-name = "pm660_l9";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1616000>;
+			regulator-max-microvolt = <1984000>;
+			qcom,init-voltage = <1616000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa10 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa10";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l10: regulator-pm660-l10 {
+			regulator-name = "pm660_l10";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1952000>;
+			qcom,init-voltage = <1696000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa11 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa11";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l11: regulator-pm660-l11 {
+			regulator-name = "pm660_l11";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1904000>;
+			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa12 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa12";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l12: regulator-pm660-l12 {
+			regulator-name = "pm660_l12";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1616000>;
+			regulator-max-microvolt = <1984000>;
+			qcom,init-voltage = <1616000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa13 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa13";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l13: regulator-pm660-l13 {
+			regulator-name = "pm660_l13";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1904000>;
+			qcom,init-voltage = <1696000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa14 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa14";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l14: regulator-pm660-l14 {
+			regulator-name = "pm660_l14";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1696000>;
+			regulator-max-microvolt = <1904000>;
+			qcom,init-voltage = <1696000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa15 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa15";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l15: regulator-pm660-l15 {
+			regulator-name = "pm660_l15";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <2896000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,init-voltage = <2896000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa16 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa16";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l16: regulator-pm660-l16 {
+		regulator-name = "pm660_l16";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <2896000>;
+			regulator-max-microvolt = <3104000>;
+			qcom,init-voltage = <2896000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa17 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa17";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l17: regulator-pm660-l17 {
+			regulator-name = "pm660_l17";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <2920000>;
+			regulator-max-microvolt = <3232000>;
+			qcom,init-voltage = <2920000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa18 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa18";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l18: regulator-pm660-l18 {
+			regulator-name = "pm660_l18";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+
+	rpmh-regulator-ldoa19 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		mboxes = <&apps_rsc 0>;
+		qcom,resource-name = "ldoa19";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 1>;
+		pm660_l19: regulator-pm660-l19 {
+			regulator-name = "pm660_l19";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <2944000>;
+			regulator-max-microvolt = <3304000>;
+			qcom,init-voltage = <2944000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
index 6506f98..7ab99a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -229,12 +229,13 @@
 		cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
 		cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
 		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk";
+			"cam_clk", "cam_vaf";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -266,12 +267,13 @@
 		cam_vana-supply = <&cam_avdd_gpio_regulator>;
 		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk";
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1200000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1200000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -303,12 +305,13 @@
 		cam_vana-supply = <&cam_avdd_gpio_regulator>;
 		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
 		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk";
+			"cam_clk", "cam_vaf";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1800000 2850000 1200000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1200000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
@@ -350,9 +353,9 @@
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1800000 2850000 1200000 0>;
+		rgltr-max-voltage = <1800000 2850000 1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -390,12 +393,12 @@
 		cam_vana-supply = <&cam_avdd_gpio_regulator>;
 		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
 		cam_clk-supply = <&titan_top_gdsc>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1200000 1800000 2850000 0>;
+		rgltr-max-voltage = <1200000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -435,9 +438,9 @@
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 0 0 0>;
-		rgltr-max-voltage = <0 0 0 0>;
-		rgltr-load-current = <0 0 0 0>;
+		rgltr-min-voltage = <1800000 2850000 1200000 0>;
+		rgltr-max-voltage = <1800000 2850000 1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
index c64ed2c..c76fbce 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
@@ -26,7 +26,7 @@
 
 			config {
 				pins = "gpio18";
-				drive-strength = <2>;
+				drive-strength = <4>;
 				output-low;
 			};
 		};
@@ -53,7 +53,7 @@
 
 			config {
 				pins = "gpio19";
-				drive-strength = <2>;
+				drive-strength = <4>;
 				output-low;
 			};
 		};
@@ -80,7 +80,7 @@
 
 			config {
 				pins = "gpio21";
-				drive-strength = <2>;
+				drive-strength = <4>;
 				output-low;
 			};
 		};
@@ -107,7 +107,7 @@
 
 			config {
 				pins = "gpio23", "gpio25";
-				drive-strength = <2>;
+				drive-strength = <4>;
 				output-low;
 			};
 		};
@@ -159,7 +159,7 @@
 
 			config {
 				pins = "gpio22";
-				drive-strength = <2>;
+				drive-strength = <4>;
 			};
 		};
 
@@ -184,7 +184,7 @@
 
 			config {
 				pins = "gpio24";
-				drive-strength = <2>;
+				drive-strength = <4>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index dd35a36..fe88aae 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -186,12 +186,4 @@
 		reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
-
-	pdc: interrupt-controller@b220000{
-		compatible = "qcom,pdc-sdm670";
-		reg = <0xb220000 0x400>;
-		#interrupt-cells = <3>;
-		interrupt-parent = <&intc>;
-		interrupt-controller;
-	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index f3c6b00..ea6e1c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -78,6 +78,8 @@
 };
 
 &pm660_charger {
+	qcom,thermal-mitigation = <4200000 3500000 3000000 2500000
+				2000000 1500000 1000000 500000>;
 	qcom,battery-data = <&qrd_batterydata>;
 	qcom,sw-jeita-enable;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index c388f4a..0fdc303 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -245,6 +245,9 @@
 		interrupts = <GIC_SPI 601 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 0 1 64 0>,
+			<&gpi_dma0 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -264,6 +267,9 @@
 		interrupts = <GIC_SPI 602 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 1 1 64 0>,
+			<&gpi_dma0 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -283,6 +289,9 @@
 		interrupts = <GIC_SPI 603 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 2 1 64 0>,
+			<&gpi_dma0 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -302,6 +311,9 @@
 		interrupts = <GIC_SPI 604 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 3 1 64 0>,
+			<&gpi_dma0 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -321,6 +333,9 @@
 		interrupts = <GIC_SPI 605 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 4 1 64 0>,
+			<&gpi_dma0 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -340,6 +355,9 @@
 		interrupts = <GIC_SPI 606 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 5 1 64 0>,
+			<&gpi_dma0 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -359,6 +377,9 @@
 		interrupts = <GIC_SPI 607 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 6 1 64 0>,
+			<&gpi_dma0 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -378,6 +399,9 @@
 		interrupts = <GIC_SPI 608 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 7 1 64 0>,
+			<&gpi_dma0 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -626,6 +650,9 @@
 		interrupts = <GIC_SPI 353 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 0 1 64 0>,
+			<&gpi_dma1 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -645,6 +672,9 @@
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 1 1 64 0>,
+			<&gpi_dma1 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -664,6 +694,9 @@
 		interrupts = <GIC_SPI 355 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 2 1 64 0>,
+			<&gpi_dma1 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -683,6 +716,9 @@
 		interrupts = <GIC_SPI 356 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 3 1 64 0>,
+			<&gpi_dma1 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -702,6 +738,9 @@
 		interrupts = <GIC_SPI 357 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 4 1 64 0>,
+			<&gpi_dma1 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -721,6 +760,9 @@
 		interrupts = <GIC_SPI 358 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 5 1 64 0>,
+			<&gpi_dma1 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -740,6 +782,9 @@
 		interrupts = <GIC_SPI 359 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 6 1 64 0>,
+			<&gpi_dma1 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -759,6 +804,9 @@
 		interrupts = <GIC_SPI 360 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 7 1 64 0>,
+			<&gpi_dma1 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 0502312..067404b0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -354,7 +354,7 @@
 				1324800    84
 				1516800    96
 				1612800   114
-				1708000   139
+				1708800   139
 			>;
 			idle-cost-data = <
 				12 10 8 6
@@ -395,7 +395,7 @@
 				1324800   13
 				1516800   15
 				1612800   16
-				1708000   19
+				1708800   19
 			>;
 			idle-cost-data = <
 				4 3 2 1
@@ -464,10 +464,22 @@
 		#size-cells = <2>;
 		ranges;
 
-		removed_regions: removed_regions@85700000 {
+		hyp_region: hyp_region@85700000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x85700000 0 0x3800000>;
+			reg = <0 0x85700000 0 0x600000>;
+		};
+
+		xbl_region: xbl_region@85e00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85e00000 0 0x100000>;
+		};
+
+		removed_region: removed_region@85fc0000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85fc0000 0 0x2f40000>;
 		};
 
 		pil_camera_mem: camera_region@8ab00000 {
@@ -689,6 +701,14 @@
 		interrupt-parent = <&intc>;
 	};
 
+	pdc: interrupt-controller@b220000{
+		compatible = "qcom,pdc-sdm670";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <1 1 0xf08>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
index f9c6f65..c6622d4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
@@ -62,7 +62,3 @@
 &dsi_sharp_4k_dsc_video_display {
 	qcom,dsi-display-active;
 };
-
-&mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &sde_dp>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index cf7ccae..3ce5626 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -751,38 +751,6 @@
 			qcom,bcms = <&bcm_cn0>;
 		};
 
-		mas_qhm_tic: mas-qhm-tic {
-			cell-id = <MSM_BUS_MASTER_TIC>;
-			label = "mas-qhm-tic";
-			qcom,buswidth = <4>;
-			qcom,agg-ports = <1>;
-			qcom,connections = <&slv_qhs_tlmm_south
-				&slv_qhs_spss_cfg &slv_qhs_camera_cfg
-				 &slv_qhs_sdc4 &slv_qhs_sdc2
-				 &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
-				 &slv_qhs_snoc_cfg &slv_qhs_glm
-				 &slv_qhs_pdm &slv_qhs_a2_noc_cfg
-				 &slv_qhs_qdss_cfg &slv_qhs_display_cfg
-				 &slv_qhs_tcsr &slv_qhs_dcc_cfg
-				 &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
-				 &slv_qhs_phy_refgen_south
-				 &slv_qhs_pcie_gen3_cfg
-				 &slv_qhs_pcie0_cfg &slv_qhs_gpuss_cfg
-				 &slv_qhs_venus_cfg &slv_qhs_tsif
-				 &slv_qhs_compute_dsp_cfg &slv_qhs_aop
-				 &slv_qhs_qupv3_north &slv_qhs_usb3_0
-				 &slv_srvc_cnoc &slv_qhs_ufs_card_cfg
-				 &slv_qhs_usb3_1 &slv_qhs_ipa
-				 &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
-				 &slv_qhs_aoss &slv_qhs_prng
-				 &slv_qhs_vsense_ctrl_cfg &slv_qhs_qupv3_south
-				 &slv_qhs_spdm &slv_qhs_crypto0_cfg
-				 &slv_qhs_pimem_cfg &slv_qhs_tlmm_north
-				 &slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
-			qcom,bus-dev = <&fab_config_noc>;
-			qcom,bcms = <&bcm_cn0>;
-		};
-
 		mas_qnm_snoc: mas-qnm-snoc {
 			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
 			label = "mas-qnm-snoc";
@@ -814,38 +782,6 @@
 			qcom,bcms = <&bcm_cn0>;
 		};
 
-		mas_xm_qdss_dap: mas-xm-qdss-dap {
-			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
-			label = "mas-xm-qdss-dap";
-			qcom,buswidth = <8>;
-			qcom,agg-ports = <1>;
-			qcom,connections = <&slv_qhs_tlmm_south
-				 &slv_qhs_spss_cfg &slv_qhs_camera_cfg
-				 &slv_qhs_sdc4 &slv_qhs_sdc2
-				 &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
-				 &slv_qhs_snoc_cfg &slv_qhs_glm
-				 &slv_qhs_pdm &slv_qhs_a2_noc_cfg
-				 &slv_qhs_qdss_cfg &slv_qhs_display_cfg
-				 &slv_qhs_tcsr &slv_qhs_dcc_cfg
-				 &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
-				 &slv_qhs_phy_refgen_south
-				 &slv_qhs_pcie_gen3_cfg
-				 &slv_qhs_pcie0_cfg &slv_qhs_gpuss_cfg
-				 &slv_qhs_venus_cfg &slv_qhs_tsif
-				 &slv_qhs_compute_dsp_cfg &slv_qhs_aop
-				 &slv_qhs_qupv3_north &slv_qhs_usb3_0
-				 &slv_srvc_cnoc &slv_qhs_ufs_card_cfg
-				 &slv_qhs_usb3_1 &slv_qhs_ipa
-				 &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
-				 &slv_qhs_aoss &slv_qhs_prng
-				 &slv_qhs_vsense_ctrl_cfg &slv_qhs_qupv3_south
-				 &slv_qhs_spdm &slv_qhs_crypto0_cfg
-				 &slv_qhs_pimem_cfg &slv_qhs_tlmm_north
-				 &slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
-			qcom,bus-dev = <&fab_config_noc>;
-			qcom,bcms = <&bcm_cn0>;
-		};
-
 		mas_qhm_cnoc: mas-qhm-cnoc {
 			cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
 			label = "mas-qhm-cnoc";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 31cfdd6..f31b3a5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -97,6 +97,10 @@
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
 		status = "ok";
+		shared-gpios = <8>;
+		pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
+		pinctrl-0 = <&cam_res_mgr_active>;
+		pinctrl-1 = <&cam_res_mgr_suspend>;
 	};
 
 	actuator_rear: qcom,actuator@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index d7f25977..a3a48af 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -97,6 +97,10 @@
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
 		status = "ok";
+		shared-gpios = <8>;
+		pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
+		pinctrl-0 = <&cam_res_mgr_active>;
+		pinctrl-1 = <&cam_res_mgr_suspend>;
 	};
 
 	actuator_rear: qcom,actuator@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 829dfcc..7d83184 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -33,6 +33,12 @@
 			qcom,ion-heap-type = "DMA";
 		};
 
+		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+			reg = <19>;
+			memory-region = <&qseecom_ta_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
 		qcom,ion-heap@13 { /* SECURE SPSS HEAP */
 			reg = <13>;
 			memory-region = <&secure_sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 244ac1d..191e76d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2985,12 +2985,12 @@
 		cam_sensor_front_active: cam_sensor_front_active {
 			/* RESET  AVDD_LDO*/
 			mux {
-				pins = "gpio28", "gpio8";
+				pins = "gpio28";
 				function = "gpio";
 			};
 
 			config {
-				pins = "gpio28", "gpio8";
+				pins = "gpio28";
 				bias-disable; /* No PULL */
 				drive-strength = <2>; /* 2 MA */
 			};
@@ -3014,12 +3014,12 @@
 		cam_sensor_iris_active: cam_sensor_iris_active {
 			/* RESET  AVDD_LDO*/
 			mux {
-				pins = "gpio9", "gpio8";
+				pins = "gpio9";
 				function = "gpio";
 			};
 
 			config {
-				pins = "gpio9", "gpio8";
+				pins = "gpio9";
 				bias-disable; /* No PULL */
 				drive-strength = <2>; /* 2 MA */
 			};
@@ -3074,12 +3074,12 @@
 		cam_sensor_rear2_active: cam_sensor_rear2_active {
 			/* RESET, STANDBY */
 			mux {
-				pins = "gpio9","gpio8";
+				pins = "gpio9";
 				function = "gpio";
 			};
 
 			config {
-				pins = "gpio9","gpio8";
+				pins = "gpio9";
 				bias-disable; /* No PULL */
 				drive-strength = <2>; /* 2 MA */
 			};
@@ -3088,17 +3088,47 @@
 		cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
 			/* RESET, STANDBY */
 			mux {
-				pins = "gpio9","gpio8";
+				pins = "gpio9";
 				function = "gpio";
 			};
 			config {
-				pins = "gpio9","gpio8";
+				pins = "gpio9";
 				bias-pull-down; /* PULL DOWN */
 				drive-strength = <2>; /* 2 MA */
 				output-low;
 			};
 		};
 
+		cam_res_mgr_active: cam_res_mgr_active {
+			/* AVDD_LDO*/
+			mux {
+				pins = "gpio8";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio8";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_res_mgr_suspend: cam_res_mgr_suspend {
+			/* AVDD_LDO */
+			mux {
+				pins = "gpio8";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio8";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+				output-low;
+			};
+		};
+
+
 		trigout_a: trigout_a {
 			mux {
 				pins = "gpio90";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 8f1afe9..b24ef1d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,12 +139,4 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
-
-	pdc: interrupt-controller@0xb220000{
-		compatible = "qcom,pdc-sdm845";
-		reg = <0xb220000 0x400>;
-		#interrupt-cells = <3>;
-		interrupt-parent = <&intc>;
-		interrupt-controller;
-	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6dae069..3ee0138 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -51,6 +51,26 @@
 	status = "ok";
 };
 
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
 &qupv3_se3_i2c {
 	status = "ok";
 	nq@28 {
@@ -162,7 +182,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb>;
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
 };
 
 &dsi_nt35597_truly_dsc_cmd {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index da4d41c..9672b94 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -296,7 +296,7 @@
 		qcom,supported-modes =
 			<RPMH_REGULATOR_MODE_LDO_LPM
 			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l7: regulator-l7 {
 			regulator-name = "pm8998_l7";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -479,7 +479,7 @@
 		qcom,supported-modes =
 			<RPMH_REGULATOR_MODE_LDO_LPM
 			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l17: regulator-l17 {
 			regulator-name = "pm8998_l17";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -587,7 +587,7 @@
 		qcom,supported-modes =
 			<RPMH_REGULATOR_MODE_LDO_LPM
 			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -624,7 +624,7 @@
 		qcom,supported-modes =
 			<RPMH_REGULATOR_MODE_LDO_LPM
 			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index d12a954..dd4e0b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -510,11 +510,6 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
-	qcom,mdss-dsi-pan-enable-dynamic-fps;
-	qcom,mdss-dsi-pan-fps-update =
-		"dfps_immediate_porch_mode_vfp";
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -580,11 +575,6 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
-	qcom,mdss-dsi-pan-enable-dynamic-fps;
-	qcom,mdss-dsi-pan-fps-update =
-		"dfps_immediate_porch_mode_vfp";
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5b3178d..1ce9f1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -603,6 +603,14 @@
 			size = <0 0x1400000>;
 		};
 
+		qseecom_ta_mem: qseecom_ta_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1000000>;
+		};
+
 		secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
@@ -754,6 +762,14 @@
 		interrupt-parent = <&intc>;
 	};
 
+	pdc: interrupt-controller@b220000{
+		compatible = "qcom,pdc-sdm845";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <1 1 0xf08>,
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 2539aa2..0e6e6f8 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -383,6 +383,8 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
@@ -413,7 +415,6 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 52f9976..a8634aa 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -395,6 +395,8 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
@@ -424,7 +426,6 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
-CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
diff --git a/drivers/clk/qcom/clk-cpu-a7.c b/drivers/clk/qcom/clk-cpu-a7.c
index c0cc00f8..3e8a75d 100644
--- a/drivers/clk/qcom/clk-cpu-a7.c
+++ b/drivers/clk/qcom/clk-cpu-a7.c
@@ -664,6 +664,11 @@
 	/* Put proxy vote for APSS PLL */
 	clk_prepare_enable(apcs_cpu_pll.clkr.hw.clk);
 
+	/* Reconfigure APSS RCG */
+	ret = clk_set_rate(apcs_clk.clkr.hw.clk, sys_apc0_aux_clk.rrate);
+	if (ret)
+		dev_err(&pdev->dev, "Unable to set aux rate on apcs_clk\n");
+
 	/* Set to TURBO boot frequency */
 	ret = clk_set_rate(apcs_clk.clkr.hw.clk, a7cc_clk_init_rate);
 	if (ret)
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
index a62a9a8..52a18ea 100644
--- a/drivers/clk/qcom/gcc-sdxpoorwills.c
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -1368,7 +1368,7 @@
 
 static struct clk_branch gcc_pcie_aux_clk = {
 	.halt_reg = 0x37020,
-	.halt_check = BRANCH_HALT_VOTED,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x6d00c,
 		.enable_mask = BIT(3),
@@ -1427,7 +1427,7 @@
 
 static struct clk_branch gcc_pcie_pipe_clk = {
 	.halt_reg = 0x37028,
-	.halt_check = BRANCH_HALT_VOTED,
+	.halt_check = BRANCH_HALT_DELAY,
 	.clkr = {
 		.enable_reg = 0x6d00c,
 		.enable_mask = BIT(4),
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 31e5b76..ba71ce8 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@
 	tristate "MSM DRM"
 	depends on DRM
 	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
-	depends on OF && COMMON_CLK
+	depends on OF
 	select REGULATOR
 	select DRM_KMS_HELPER
 	select DRM_PANEL
@@ -39,6 +39,7 @@
 config DRM_MSM_HDMI
 	bool "Enable HDMI support in MSM DRM driver"
 	depends on DRM_MSM
+	depends on COMMON_CLK
 	default n
 	help
 	  Compile in support for HDMI driver in msm drm
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 2d76d13..79f2ec9 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -332,6 +332,7 @@
 	struct drm_dp_aux_msg helper_msg;
 	u32 const message_size = 0x10;
 	u32 const segment_address = 0x30;
+	u32 const edid_block_length = 0x80;
 	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
 	bool i2c_read = input_msg->request &
 		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
@@ -339,6 +340,15 @@
 	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
 		return;
 
+	/*
+	 * Sending the segment value and EDID offset will be performed
+	 * from the DRM upstream EDID driver for each block. Avoid
+	 * duplicate AUX transactions related to this while reading the
+	 * first 16 bytes of each block.
+	 */
+	if (!(aux->offset % edid_block_length))
+		goto end;
+
 	aux->read = false;
 	aux->cmd_busy = true;
 	aux->no_send_addr = true;
@@ -371,6 +381,7 @@
 	helper_msg.buffer = &aux->offset;
 	helper_msg.size = 1;
 	dp_aux_cmd_fifo_tx(aux, &helper_msg);
+end:
 	aux->offset += message_size;
 
 	if (aux->offset == 0x80 || aux->offset == 0x100)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 609ae52..5318a5f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1075,6 +1075,7 @@
 		for (cnt = 0; cnt < length; cnt++)
 			cmdbuf[dsi_ctrl->cmd_len + cnt] = buffer[cnt];
 
+		msm_gem_sync(dsi_ctrl->tx_cmd_buf);
 		dsi_ctrl->cmd_len += length;
 
 		if (!(msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
@@ -2222,6 +2223,8 @@
 	if (dsi_ctrl->hw.ops.clear_interrupt_status)
 		dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, 0x0);
 
+	SDE_EVT32_IRQ(dsi_ctrl->cell_index, status, errors);
+
 	/* handle DSI error recovery */
 	if (status & DSI_ERROR)
 		dsi_ctrl_handle_error_status(dsi_ctrl, errors);
@@ -2281,7 +2284,7 @@
  * @dsi_ctrl: Pointer to associated dsi_ctrl structure
  * Returns: Zero on success
  */
-static int dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
+static int _dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
 {
 	int irq_num, rc;
 
@@ -2475,8 +2478,6 @@
 		}
 	}
 
-	dsi_ctrl_setup_isr(dsi_ctrl);
-
 	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
 	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
 
@@ -2488,6 +2489,25 @@
 	return rc;
 }
 
+/**
+ * dsi_ctrl_isr_configure() - API to register/deregister dsi isr
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:		   variable to control register/deregister isr
+ */
+void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	if (!dsi_ctrl)
+		return;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	if (enable)
+		_dsi_ctrl_setup_isr(dsi_ctrl);
+	else
+		_dsi_ctrl_destroy_isr(dsi_ctrl);
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
 int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl)
 {
 	if (!dsi_ctrl)
@@ -2563,8 +2583,6 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	_dsi_ctrl_destroy_isr(dsi_ctrl);
-
 	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
 	if (rc) {
 		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -3255,6 +3273,28 @@
 }
 
 /**
+ * dsi_ctrl_irq_update() - Put a irq vote to process DSI error
+ *				interrupts at any time.
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:		   variable to enable/disable irq
+ */
+void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	if (!dsi_ctrl)
+		return;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	if (enable)
+		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+					DSI_SINT_ERROR, NULL);
+	else
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_ERROR);
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 80b91ca..8850df4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -703,4 +703,18 @@
 int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl, u32 cmd_len,
 		u32 *flags);
 
+/**
+ * dsi_ctrl_isr_configure() - API to register/deregister dsi isr
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:		   variable to control register/deregister isr
+ */
+void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_irq_update() - Put a irq vote to process DSI error
+ *				interrupts at any time.
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:		   variable to control enable/disable irq line
+ */
+void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable);
 #endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index d92a71d..985cb51 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2369,6 +2369,40 @@
 	return 0;
 }
 
+static void dsi_display_ctrl_isr_configure(struct dsi_display *display, bool en)
+{
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	if (!display)
+		return;
+
+	for (i = 0; (i < display->ctrl_count) &&
+			(i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl)
+			continue;
+		dsi_ctrl_isr_configure(ctrl->ctrl, en);
+	}
+}
+
+static void dsi_display_ctrl_irq_update(struct dsi_display *display, bool en)
+{
+	int i;
+	struct dsi_display_ctrl *ctrl;
+
+	if (!display)
+		return;
+
+	for (i = 0; (i < display->ctrl_count) &&
+			(i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl)
+			continue;
+		dsi_ctrl_irq_update(ctrl->ctrl, en);
+	}
+}
+
 int dsi_pre_clkoff_cb(void *priv,
 			   enum dsi_clk_type clk,
 			   enum dsi_clk_state new_state)
@@ -2485,6 +2519,9 @@
 		 */
 		if (display->phy_idle_power_off || mmss_clamp)
 			dsi_display_phy_idle_on(display, mmss_clamp);
+
+		/* enable dsi to serve irqs */
+		dsi_display_ctrl_irq_update(display, true);
 	}
 	if (clk & DSI_LINK_CLK) {
 		if (display->ulps_enabled) {
@@ -2514,6 +2551,8 @@
 
 	if ((clk_type & DSI_CORE_CLK) &&
 	    (curr_state == DSI_CLK_OFF)) {
+		/* dsi will not be able to serve irqs from here */
+		dsi_display_ctrl_irq_update(display, false);
 
 		rc = dsi_display_phy_power_off(display);
 		if (rc)
@@ -4752,6 +4791,9 @@
 		}
 	}
 
+	/* Set up ctrl isr before enabling core clk */
+	dsi_display_ctrl_isr_configure(display, true);
+
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_ON);
 	if (rc) {
@@ -5280,6 +5322,9 @@
 		pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
 		       display->name, rc);
 
+	/* destrory dsi isr set up */
+	dsi_display_ctrl_isr_configure(display, false);
+
 	rc = dsi_panel_post_unprepare(display->panel);
 	if (rc)
 		pr_err("[%s] panel post-unprepare failed, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e5c3082..cc09256 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -727,6 +727,7 @@
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
+void msm_gem_sync(struct drm_gem_object *obj);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 			struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a015379..d9aad88 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -150,6 +150,24 @@
 	/* when we start tracking the pin count, then do something here */
 }
 
+void msm_gem_sync(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj;
+
+	if (!obj)
+		return;
+
+	msm_obj = to_msm_bo(obj);
+
+	/*
+	 * dma_sync_sg_for_device synchronises a single contiguous or
+	 * scatter/gather mapping for the CPU and device.
+	 */
+	dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl,
+		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma)
 {
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 08e6f79..6859f6e 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -46,6 +46,12 @@
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
 	bool (*is_domain_secure)(struct msm_mmu *mmu);
+	int (*set_attribute)(struct msm_mmu *mmu,
+			enum iommu_attr attr, void *data);
+	int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
+			uint32_t dest_address, uint32_t size, int prot);
+	int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
+					uint32_t size);
 };
 
 struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 7c879651..211acce 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -113,6 +113,74 @@
 	dev_dbg(client->dev, "iommu domain detached\n");
 }
 
+static int msm_smmu_set_attribute(struct msm_mmu *mmu,
+		enum iommu_attr attr, void *data)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain) {
+		DRM_ERROR("Invalid domain ret:%d\n", ret);
+		return -EINVAL;
+	}
+
+	ret = iommu_domain_set_attr(domain, attr, data);
+	if (ret)
+		DRM_ERROR("set domain attribute failed:%d\n", ret);
+
+	return ret;
+}
+
+static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
+				uint32_t dest_address, uint32_t size)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain)
+		return -EINVAL;
+
+	ret = iommu_unmap(domain, dest_address, size);
+	if (ret != size)
+		pr_err("smmu unmap failed\n");
+
+	return 0;
+}
+
+static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
+		uint32_t dest_address, uint32_t size, int prot)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	struct iommu_domain *domain;
+	int ret = 0;
+
+	if (!client || !client->mmu_mapping)
+		return -ENODEV;
+
+	domain = client->mmu_mapping->domain;
+	if (!domain)
+		return -EINVAL;
+
+	ret = iommu_map(domain, dest_address, dest_address, size, prot);
+	if (ret)
+		pr_err("smmu map failed\n");
+
+	return ret;
+}
+
 static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
 		struct sg_table *sgt, int prot)
 {
@@ -299,6 +367,9 @@
 	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
 	.destroy = msm_smmu_destroy,
 	.is_domain_secure = msm_smmu_is_domain_secure,
+	.set_attribute = msm_smmu_set_attribute,
+	.one_to_one_map = msm_smmu_one_to_one_map,
+	.one_to_one_unmap = msm_smmu_one_to_one_unmap,
 };
 
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
@@ -444,9 +515,7 @@
 	DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
 
 	/* generate dump, but no panic */
-	SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
-			"dsi1_phy", "vbif", "dbg_bus",
-			"vbif_dbg_bus");
+	SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
 
 	/*
 	 * return -ENOSYS to allow smmu driver to dump out useful
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 3879d4d..0334ead 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -29,6 +29,8 @@
 
 #define SDE_PERF_MODE_STRING_SIZE	128
 
+static DEFINE_MUTEX(sde_core_perf_lock);
+
 /**
  * enum sde_perf_mode - performance tuning mode
  * @SDE_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -303,8 +305,7 @@
 }
 
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
-		struct drm_crtc *crtc, u32 bus_id,
-		struct sde_core_perf_params *crtc_perf)
+		struct drm_crtc *crtc, u32 bus_id)
 {
 	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
 	struct sde_core_perf_params perf = { { 0 } };
@@ -313,6 +314,7 @@
 	struct drm_crtc *tmp_crtc;
 	struct sde_crtc_state *sde_cstate;
 	struct msm_drm_private *priv = kms->dev->dev_private;
+	struct sde_crtc *sde_crtc;
 
 	u64 tmp_max_per_pipe_ib;
 	u64 tmp_bw_ctl;
@@ -322,18 +324,12 @@
 		    _is_crtc_client_type_matches(tmp_crtc, curr_client_type,
 								&kms->perf)) {
 
-			if (crtc->base.id == tmp_crtc->base.id) {
-				/* for current crtc use the cached values */
-				tmp_max_per_pipe_ib =
-					crtc_perf->max_per_pipe_ib[bus_id];
-				tmp_bw_ctl = crtc_perf->bw_ctl[bus_id];
-			} else {
-				sde_cstate = to_sde_crtc_state(tmp_crtc->state);
-				tmp_max_per_pipe_ib =
-				  sde_cstate->new_perf.max_per_pipe_ib[bus_id];
-				tmp_bw_ctl =
-				  sde_cstate->new_perf.bw_ctl[bus_id];
-			}
+			/* use current perf, which are the values voted */
+			sde_crtc = to_sde_crtc(tmp_crtc);
+			tmp_max_per_pipe_ib =
+			  sde_crtc->cur_perf.max_per_pipe_ib[bus_id];
+			tmp_bw_ctl =
+			  sde_crtc->cur_perf.bw_ctl[bus_id];
 
 			perf.max_per_pipe_ib[bus_id] =
 				max(perf.max_per_pipe_ib[bus_id],
@@ -468,30 +464,25 @@
 		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
 		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
 			sde_crtc->cur_perf.bw_ctl[i] = 0;
-			_sde_core_perf_crtc_update_bus(kms, crtc, i,
-				&sde_crtc->cur_perf);
+			_sde_core_perf_crtc_update_bus(kms, crtc, i);
 		}
 	}
 }
 
-static u64 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
-	struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
+static u64 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
 {
 	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
 	struct drm_crtc *tmp_crtc;
-	struct sde_crtc_state *sde_cstate;
+	struct sde_crtc *sde_crtc;
 	u64 tmp_rate;
 
 	drm_for_each_crtc(tmp_crtc, kms->dev) {
 		if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
 
-			if (crtc->base.id == tmp_crtc->base.id) {
-				/* for current CRTC, use the cached value */
-				tmp_rate = crct_perf->core_clk_rate;
-			} else {
-				sde_cstate = to_sde_crtc_state(tmp_crtc->state);
-				tmp_rate = sde_cstate->new_perf.core_clk_rate;
-			}
+			/* use current perf, which are the values voted */
+			sde_crtc = to_sde_crtc(tmp_crtc);
+			tmp_rate = sde_crtc->cur_perf.core_clk_rate;
+
 			clk_rate = max(tmp_rate, clk_rate);
 
 			clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
@@ -529,17 +520,14 @@
 		return;
 	}
 	priv = kms->dev->dev_private;
-
-	/* wake vote update is not required with display rsc */
-	if (kms->perf.bw_vote_mode == DISP_RSC_MODE && stop_req)
-		return;
-
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
 			crtc->base.id, stop_req, kms->perf.core_clk_rate);
 
+	mutex_lock(&sde_core_perf_lock);
+
 	/*
 	 * cache the performance numbers in the crtc prior to the
 	 * crtc kickoff, so the same numbers are used during the
@@ -641,7 +629,7 @@
 
 	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
 		if (update_bus & BIT(i))
-			_sde_core_perf_crtc_update_bus(kms, crtc, i, old);
+			_sde_core_perf_crtc_update_bus(kms, crtc, i);
 	}
 
 	/*
@@ -649,7 +637,7 @@
 	 * bandwidth is available before clock rate is increased.
 	 */
 	if (update_clk) {
-		clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
+		clk_rate = _sde_core_perf_get_core_clk_rate(kms);
 
 		SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
 			old->core_clk_rate, new->core_clk_rate);
@@ -658,12 +646,15 @@
 		if (ret) {
 			SDE_ERROR("failed to set %s clock rate %llu\n",
 					kms->perf.clk_name, clk_rate);
+			mutex_unlock(&sde_core_perf_lock);
 			return;
 		}
 
 		kms->perf.core_clk_rate = clk_rate;
 		SDE_DEBUG("update clk rate = %lld HZ\n", clk_rate);
 	}
+	mutex_unlock(&sde_core_perf_lock);
+
 }
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 94c7f40..9cdef88 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -3501,6 +3501,9 @@
 	if (dump_status)
 		SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
 
+	/* optionally generate a panic instead of performing a h/w reset */
+	SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic");
+
 	for (i = 0; i < sde_crtc->num_mixers; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.reset)
@@ -3618,10 +3621,11 @@
 
 		/*
 		 * For inline ASYNC modes, the flush bits are not written
-		 * to hardware atomically, so avoid using it if a video
-		 * mode encoder is active on this CRTC.
+		 * to hardware atomically. This is not fully supported for
+		 * non-command mode encoders, so force SYNC mode if any
+		 * of them are attached to the CRTC.
 		 */
-		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) {
+		if (sde_encoder_get_intf_mode(encoder) != INTF_MODE_CMD) {
 			cstate->sbuf_cfg.rot_op_mode =
 				SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
 			return false;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index b6888df..e24c8c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -2140,7 +2140,7 @@
 		mutex_lock(&sde_enc->rc_lock);
 
 		if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
-			SDE_ERROR_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
 					sw_event, sde_enc->rc_state);
 			SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
 					SDE_EVTLOG_ERROR);
@@ -2768,18 +2768,35 @@
 	}
 }
 
+int sde_encoder_idle_request(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid drm encoder\n");
+		return -EINVAL;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	sde_encoder_resource_control(&sde_enc->base,
+						SDE_ENC_RC_EVENT_ENTER_IDLE);
+
+	return 0;
+}
+
 static void sde_encoder_off_work(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
 			struct sde_encoder_virt, delayed_off_work.work);
+	struct drm_encoder *drm_enc;
 
 	if (!sde_enc) {
 		SDE_ERROR("invalid sde encoder\n");
 		return;
 	}
+	drm_enc = &sde_enc->base;
 
-	sde_encoder_resource_control(&sde_enc->base,
-						SDE_ENC_RC_EVENT_ENTER_IDLE);
+	sde_encoder_idle_request(drm_enc);
 }
 
 /**
@@ -3436,16 +3453,21 @@
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
+	struct sde_kms *sde_kms = NULL;
+	struct msm_drm_private *priv = NULL;
 	bool needs_hw_reset = false;
 	uint32_t ln_cnt1, ln_cnt2;
 	unsigned int i;
 	int rc, ret = 0;
 
-	if (!drm_enc || !params) {
+	if (!drm_enc || !params || !drm_enc->dev ||
+		!drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid args\n");
 		return -EINVAL;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
@@ -3514,7 +3536,8 @@
 		}
 	}
 
-	if (_sde_encoder_is_dsc_enabled(drm_enc)) {
+	if (_sde_encoder_is_dsc_enabled(drm_enc) &&
+		!sde_kms->splash_data.cont_splash_en) {
 		rc = _sde_encoder_dsc_setup(sde_enc, params);
 		if (rc) {
 			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
@@ -4381,8 +4404,8 @@
 		return ret;
 	}
 
-	if (conn->encoder) {
-		conn->state->best_encoder = conn->encoder;
+	if (sde_conn->encoder) {
+		conn->state->best_encoder = sde_conn->encoder;
 		SDE_DEBUG_ENC(sde_enc,
 			"configured cstate->best_encoder to ID = %d\n",
 			conn->state->best_encoder->base.id);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index f8a3cf3..937bd18 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -155,6 +155,14 @@
 int sde_encoder_wait_for_event(struct drm_encoder *drm_encoder,
 						enum msm_event_wait event);
 
+/**
+ * sde_encoder_idle_request - request for idle request to avoid 4 vsync cycle
+ *                            to turn off the clocks.
+ * @encoder:	encoder pointer
+ * Returns: 0 on success, errorcode otherwise
+ */
+int sde_encoder_idle_request(struct drm_encoder *drm_enc);
+
 /*
  * sde_encoder_get_intf_mode - get interface mode of the given encoder
  * @encoder: Pointer to drm encoder object
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index d7cbfbe..7ba9ec9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -891,7 +891,8 @@
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
 	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
-		SDE_ERROR("already enabled\n");
+		if (!phys_enc->sde_kms->splash_data.cont_splash_en)
+			SDE_ERROR("already enabled\n");
 		return;
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index aaf50f6..ad27b7f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -263,12 +263,13 @@
 	if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
 			!phys_enc->hw_ctl->ops.get_bitmask_intf ||
 			!phys_enc->hw_ctl->ops.update_pending_flush ||
-			!vid_enc->hw_intf->ops.setup_rot_start)
+			!vid_enc->hw_intf->ops.setup_rot_start ||
+			!phys_enc->sde_kms)
 		return;
 
 	timing = &vid_enc->timing_params;
 	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
-	if (vfp_fetch_lines && rot_fetch_lines) {
+	if (rot_fetch_lines) {
 		vert_total = get_vertical_total(timing);
 		horiz_total = get_horizontal_total(timing);
 		if (vert_total >= (vfp_fetch_lines + rot_fetch_lines)) {
@@ -277,6 +278,13 @@
 			    horiz_total + 1;
 			f.enable = 1;
 			f.fetch_start = rot_fetch_start_vsync_counter;
+		} else {
+			SDE_ERROR_VIDENC(vid_enc,
+				"vert_total %u rot_fetch_lines %u vfp_fetch_lines %u\n",
+				vert_total, rot_fetch_lines, vfp_fetch_lines);
+			SDE_EVT32(DRMID(phys_enc->parent), vert_total,
+				rot_fetch_lines, vfp_fetch_lines,
+				SDE_EVTLOG_ERROR);
 		}
 	}
 
@@ -290,14 +298,17 @@
 		rot_fetch_lines, vfp_fetch_lines,
 		rot_fetch_start_vsync_counter);
 
-	phys_enc->hw_ctl->ops.get_bitmask_intf(
-			phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
-	phys_enc->hw_ctl->ops.update_pending_flush(
-			phys_enc->hw_ctl, flush_mask);
+	if (!phys_enc->sde_kms->splash_data.cont_splash_en) {
+		phys_enc->hw_ctl->ops.get_bitmask_intf(
+				phys_enc->hw_ctl, &flush_mask,
+				vid_enc->hw_intf->idx);
+		phys_enc->hw_ctl->ops.update_pending_flush(
+				phys_enc->hw_ctl, flush_mask);
 
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+	}
 
 	vid_enc->rot_fetch = f;
 	vid_enc->rot_fetch_valid = true;
@@ -328,7 +339,8 @@
 	unsigned long lock_flags;
 	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
-	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
 		SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
 		return;
 	}
@@ -358,6 +370,14 @@
 
 	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
 
+	vid_enc->timing_params = timing_params;
+
+	if (phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_DEBUG_VIDENC(vid_enc,
+			"skipping intf programming since cont splash is enabled\n");
+		return;
+	}
+
 	fmt = sde_get_sde_format(fmt_fourcc);
 	SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
 
@@ -371,10 +391,7 @@
 			&timing_params, fmt);
 	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
 	programmable_fetch_config(phys_enc, &timing_params);
-
-	vid_enc->timing_params = timing_params;
 }
 
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -653,7 +670,8 @@
 	u32 flush_mask = 0;
 
 	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
-			!phys_enc->parent->dev->dev_private) {
+			!phys_enc->parent->dev->dev_private ||
+			!phys_enc->sde_kms) {
 		SDE_ERROR("invalid encoder/device\n");
 		return;
 	}
@@ -676,7 +694,9 @@
 	/* reset state variables until after first update */
 	vid_enc->rot_fetch_valid = false;
 
-	sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+	if (!phys_enc->sde_kms->splash_data.cont_splash_en)
+		sde_encoder_helper_split_config(phys_enc,
+						vid_enc->hw_intf->idx);
 
 	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
 
@@ -689,6 +709,17 @@
 		!sde_encoder_phys_vid_is_master(phys_enc))
 		goto skip_flush;
 
+	/**
+	 * skip flushing intf during cont. splash handoff since bootloader
+	 * has already enabled the hardware and is single buffered.
+	 */
+
+	if (phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_DEBUG_VIDENC(vid_enc,
+		"skipping intf flush bit set as cont. splash is enabled\n");
+		goto skip_flush;
+	}
+
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 4437987..b8c790f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -42,47 +42,6 @@
 
 #define SDE_REG_RESET_TIMEOUT_US        2000
 
-#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
-#define CTL_FLUSH_LM_BIT(n) (6 + n)
-#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))
-
-int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
-		void __iomem *mmio)
-{
-	int i, j;
-	u32 op_mode;
-
-	if (!data) {
-		pr_err("invalid splash data\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < data->ctl_top_cnt; i++) {
-		struct ctl_top *top = &data->top[i];
-		u8 ctl_id = data->ctl_ids[i] - CTL_0;
-		u32 regval = 0;
-
-		op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));
-
-		/* Set border fill*/
-		regval |= CTL_MIXER_BORDER_OUT;
-
-		for (j = 0; j < top->ctl_lm_cnt; j++) {
-			u8 lm_id = top->lm[j].lm_id - LM_0;
-
-			writel_relaxed(regval,
-			mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));
-
-			op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
-		}
-		op_mode |= CTL_FLUSH_MASK_CTL;
-
-		writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
-	}
-	return 0;
-
-}
-
 static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index a9bd104..435fc21 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -277,15 +277,6 @@
 };
 
 /**
- * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
- * @data: pointer to sde splash data
- * @mmio: mapped register io address of MDP
- * @return: error code
- */
-int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
-		void __iomem *mmio);
-
-/**
  * sde_hw_ctl - convert base object sde_hw_base to container
  * @hw: Pointer to base hardware block
  * return: Pointer to hardware block container
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index cf65784..6ccf957 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -481,6 +481,7 @@
 	cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
 	cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
 
+	msm_gem_sync(cfg->dma_buf->buf);
 	SET_UP_REG_DMA_REG(hw, reg_dma);
 	SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
 	SDE_REG_WRITE(&hw, reg_dma_clear_status_off,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index e7aa6ea..0d85c53 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -260,13 +260,13 @@
  */
 static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
 		const struct sde_format *fmt,
-		bool blend_enabled, u32 flags,
+		bool const_alpha_en, u32 flags,
 		enum sde_sspp_multirect_index rect_mode)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 chroma_samp, unpack, src_format;
 	u32 opmode = 0;
-	u32 fast_clear = 0;
+	u32 alpha_en_mask = 0;
 	u32 op_mode_off, unpack_pat_off, format_off;
 	u32 idx;
 
@@ -329,11 +329,10 @@
 			SDE_FETCH_CONFIG_RESET_VALUE |
 			ctx->mdp->highest_bank_bit << 18);
 		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
-			fast_clear = (fmt->alpha_enable && blend_enabled) ?
-				BIT(31) : 0;
+			alpha_en_mask = const_alpha_en ? BIT(31) : 0;
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-					fast_clear | (ctx->mdp->ubwc_swizzle) |
-					(ctx->mdp->highest_bank_bit << 4));
+				alpha_en_mask | (ctx->mdp->ubwc_swizzle) |
+				(ctx->mdp->highest_bank_bit << 4));
 		}
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index f4b362f..13fc636 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -526,6 +526,56 @@
 
 }
 
+static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+		struct sde_splash_data *data)
+{
+	int ret = 0;
+
+	if (!mmu || !data)
+		return -EINVAL;
+
+	ret = mmu->funcs->one_to_one_map(mmu, data->splash_base,
+				data->splash_base, data->splash_size,
+				IOMMU_READ | IOMMU_NOEXEC);
+	if (ret)
+		SDE_ERROR("Splash smmu map failed: %d\n", ret);
+
+	return ret;
+}
+
+static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms)
+{
+	struct sde_splash_data *data;
+	struct msm_mmu *mmu;
+	int rc = 0;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	data = &sde_kms->splash_data;
+	if (!data) {
+		SDE_ERROR("Invalid splash data\n");
+		return -EINVAL;
+	}
+
+	if (!sde_kms->aspace[0]) {
+		SDE_ERROR("aspace not found for sde kms node\n");
+		return -EINVAL;
+	}
+
+	mmu = sde_kms->aspace[0]->mmu;
+	if (!mmu) {
+		SDE_ERROR("mmu not found for aspace\n");
+		return -EINVAL;
+	}
+
+	if (mmu->funcs && mmu->funcs->one_to_one_unmap)
+		mmu->funcs->one_to_one_unmap(mmu, data->splash_base,
+				data->splash_size);
+
+	return rc;
+}
+
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
@@ -536,8 +586,6 @@
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	int i, rc = 0;
-	struct drm_plane *plane;
-	bool commit_no_planes = true;
 
 	if (!kms)
 		return;
@@ -566,28 +614,8 @@
 		}
 	}
 
-	if (sde_kms->splash_data.smmu_handoff_pending) {
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-			if (plane->state != NULL &&
-					plane->state->crtc != NULL)
-				commit_no_planes = false;
-	}
-
-	if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {
-
-		rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
-						sde_kms->mmio);
-		if (rc)
-			SDE_ERROR("pipe staging failed: %d\n", rc);
-
-		rc = _sde_kms_release_splash_buffer(
-				sde_kms->splash_data.splash_base,
-				sde_kms->splash_data.splash_size);
-		if (rc)
-			SDE_ERROR("release of splash memory failed %d\n", rc);
-
+	if (sde_kms->splash_data.smmu_handoff_pending)
 		sde_kms->splash_data.smmu_handoff_pending = false;
-	}
 
 	/*
 	 * NOTE: for secure use cases we want to apply the new HW
@@ -667,12 +695,28 @@
 	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
 
 	if (sde_kms->splash_data.cont_splash_en) {
+		/* Releasing splash resources as we have first frame update */
+		rc = _sde_kms_splash_smmu_unmap(sde_kms);
 		SDE_DEBUG("Disabling cont_splash feature\n");
 		sde_kms->splash_data.cont_splash_en = false;
 		sde_power_resource_enable(&priv->phandle,
 				sde_kms->core_client, false);
 		SDE_DEBUG("removing Vote for MDP Resources\n");
 	}
+
+	/*
+	 * Even for continuous splash disabled cases we have to release
+	 * splash memory reservation back to system after first frame update.
+	 */
+	if (sde_kms->splash_data.splash_base) {
+		rc = _sde_kms_release_splash_buffer(
+				sde_kms->splash_data.splash_base,
+				sde_kms->splash_data.splash_size);
+		if (rc)
+			pr_err("Failed to release splash memory\n");
+		sde_kms->splash_data.splash_base = 0;
+		sde_kms->splash_data.splash_size = 0;
+	}
 }
 
 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
@@ -1562,6 +1606,9 @@
 				&priv->phandle, sde_kms->power_event);
 
 	_sde_kms_release_displays(sde_kms);
+	(void)_sde_kms_release_splash_buffer(
+				sde_kms->splash_data.splash_base,
+				sde_kms->splash_data.splash_size);
 
 	/* safe to call these more than once during shutdown */
 	_sde_debugfs_destroy(sde_kms);
@@ -2107,39 +2154,6 @@
 
 }
 
-static int _sde_kms_gen_drm_mode(struct sde_kms *sde_kms,
-				void *display,
-				struct drm_display_mode *drm_mode)
-{
-	struct dsi_display_mode *modes = NULL;
-	u32 count = 0;
-	int rc = 0;
-
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc) {
-		SDE_ERROR("failed to get num of modes, rc=%d\n", rc);
-		return rc;
-	}
-
-	SDE_DEBUG("num of modes = %d\n", count);
-
-	rc = dsi_display_get_modes(display, &modes);
-	if (rc) {
-		SDE_ERROR("failed to get modes, rc=%d\n", rc);
-		count = 0;
-		return rc;
-	}
-
-	/* TODO; currently consider modes[0] as the preferred mode */
-	dsi_convert_to_drm_mode(&modes[0], drm_mode);
-
-	SDE_DEBUG("hdisplay = %d, vdisplay = %d\n",
-		drm_mode->hdisplay, drm_mode->vdisplay);
-	drm_mode_set_name(drm_mode);
-	drm_mode_set_crtcinfo(drm_mode, 0);
-	return rc;
-}
-
 static int sde_kms_cont_splash_config(struct msm_kms *kms)
 {
 	void *display;
@@ -2152,6 +2166,9 @@
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct list_head *connector_list = NULL;
+	struct drm_connector *conn_iter = NULL;
+	struct drm_connector *connector = NULL;
 
 	if (!kms) {
 		SDE_ERROR("invalid kms\n");
@@ -2204,13 +2221,46 @@
 	crtc = encoder->crtc;
 	SDE_DEBUG("crtc id = %d\n", crtc->base.id);
 
-	crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
-	drm_mode = drm_mode_create(encoder->dev);
-	if (!drm_mode) {
-		SDE_ERROR("drm_mode create failed\n");
+
+	mutex_lock(&dev->mode_config.mutex);
+	connector_list = &dev->mode_config.connector_list;
+	list_for_each_entry(conn_iter, connector_list, head) {
+		/**
+		 * SDE_KMS doesn't attach more than one encoder to
+		 * a DSI connector. So it is safe to check only with the
+		 * first encoder entry. Revisit this logic if we ever have
+		 * to support continuous splash for external displays in MST
+		 * configuration.
+		 */
+		if (conn_iter &&
+			conn_iter->encoder_ids[0] == encoder->base.id) {
+			connector = conn_iter;
+			break;
+		}
+	}
+
+	if (!connector) {
+		SDE_ERROR("connector not initialized\n");
+		mutex_unlock(&dev->mode_config.mutex);
 		return -EINVAL;
 	}
-	_sde_kms_gen_drm_mode(sde_kms, display, drm_mode);
+
+	if (connector->funcs->fill_modes) {
+		connector->funcs->fill_modes(connector,
+			dev->mode_config.max_width,
+			dev->mode_config.max_height);
+	} else {
+		SDE_ERROR("fill_modes api not defined\n");
+		mutex_unlock(&dev->mode_config.mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
+
+	/* currently consider modes[0] as the preferred mode */
+	drm_mode = list_first_entry(&connector->modes,
+					struct drm_display_mode, head);
 	SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
 			drm_mode->name, drm_mode->base.id,
 			drm_mode->type, drm_mode->flags);
@@ -2323,7 +2373,8 @@
 	if (num_crtcs == 0) {
 		DRM_DEBUG("all crtcs are already in the off state\n");
 		drm_atomic_state_free(state);
-		goto suspended;
+		sde_kms->suspend_block = true;
+		goto unlock;
 	}
 
 	/* commit the "disable all" state */
@@ -2334,9 +2385,24 @@
 		goto unlock;
 	}
 
-suspended:
 	sde_kms->suspend_block = true;
 
+	drm_for_each_connector(conn, ddev) {
+		uint64_t lp;
+
+		lp = sde_connector_get_lp(conn);
+		if (lp != SDE_MODE_DPMS_LP2)
+			continue;
+
+		ret = sde_encoder_wait_for_event(conn->encoder,
+						MSM_ENC_TX_COMPLETE);
+		if (ret && ret != -EWOULDBLOCK)
+			SDE_ERROR(
+				"[enc: %d] wait for commit done returned %d\n",
+				conn->encoder->base.id, ret);
+		else if (!ret)
+			sde_encoder_idle_request(conn->encoder);
+	}
 unlock:
 	if (ret == -EDEADLK) {
 		drm_modeset_backoff(&ctx);
@@ -2450,6 +2516,7 @@
 {
 	struct msm_mmu *mmu;
 	int i, ret;
+	int early_map = 1;
 
 	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
 		struct msm_gem_address_space *aspace;
@@ -2462,6 +2529,23 @@
 			continue;
 		}
 
+		/*
+		 * Before attaching SMMU, we need to honor continuous splash
+		 * use case where hardware tries to fetch buffer from physical
+		 * address. To facilitate this requirement we need to have a
+		 * one to one mapping on SMMU until we have our first frame.
+		 */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+			sde_kms->splash_data.smmu_handoff_pending) {
+			ret = mmu->funcs->set_attribute(mmu,
+				DOMAIN_ATTR_EARLY_MAP,
+				&early_map);
+			if (ret) {
+				SDE_ERROR("failed to set map att: %d\n", ret);
+				goto fail;
+			}
+		}
+
 		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
 			mmu, "sde");
 		if (IS_ERR(aspace)) {
@@ -2480,10 +2564,37 @@
 			goto fail;
 		}
 		aspace->domain_attached = true;
+		early_map = 0;
+		/* Mapping splash memory block */
+		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+			sde_kms->splash_data.smmu_handoff_pending) {
+			ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu,
+					&sde_kms->splash_data);
+			if (ret) {
+				SDE_ERROR("failed to map ret:%d\n", ret);
+				goto fail;
+			}
+			/*
+			 * Turning off early map after generating one to one
+			 * mapping for splash address space.
+			 */
+			ret = mmu->funcs->set_attribute(mmu,
+				DOMAIN_ATTR_EARLY_MAP,
+				&early_map);
+			if (ret) {
+				SDE_ERROR("failed to set map att ret:%d\n",
+									ret);
+				goto early_map_fail;
+			}
+		}
 	}
 
 	return 0;
+early_map_fail:
+	mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base,
+					sde_kms->splash_data.splash_size);
 fail:
+	mmu->funcs->destroy(mmu);
 	_sde_kms_mmu_destroy(sde_kms);
 
 	return ret;
@@ -2591,8 +2702,6 @@
 	pr_info("found continuous splash base address:%lx size:%x\n",
 						data->splash_base,
 						data->splash_size);
-	data->smmu_handoff_pending = true;
-
 	return ret;
 }
 
@@ -2602,7 +2711,6 @@
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
 	struct sde_rm *rm = NULL;
-	bool splash_mem_found = false;
 	int i, rc = -EINVAL;
 
 	if (!kms) {
@@ -2696,12 +2804,8 @@
 	}
 
 	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
-	if (rc) {
+	if (rc)
 		SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
-		splash_mem_found = false;
-	} else {
-		splash_mem_found = true;
-	}
 
 	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
 		true);
@@ -2740,11 +2844,18 @@
 	 * Attempt continuous splash handoff only if reserved
 	 * splash memory is found.
 	 */
-	if (splash_mem_found)
+	if (sde_kms->splash_data.splash_base)
 		sde_rm_cont_splash_res_init(&sde_kms->rm,
 					&sde_kms->splash_data,
 					sde_kms->catalog);
 
+	/*
+	 * SMMU handoff is necessary for continuous splash enabled
+	 * scenario.
+	 */
+	if (sde_kms->splash_data.cont_splash_en)
+		sde_kms->splash_data.smmu_handoff_pending = true;
+
 	/* Initialize reg dma block which is a singleton */
 	rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
 			sde_kms->dev);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 9f27286..f2f870f 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2810,6 +2810,7 @@
 	bool parallel_fetch_qualified = true;
 	enum sde_sspp_multirect_mode mode = SDE_SSPP_MULTIRECT_NONE;
 	const struct msm_format *msm_fmt;
+	bool const_alpha_enable = true;
 
 	for (i = 0; i < R_MAX; i++) {
 		drm_state[i] = i ? plane->r1 : plane->r0;
@@ -2877,6 +2878,10 @@
 		if (sde_plane[i]->is_virtual)
 			mode = sde_plane_get_property(pstate[i],
 					PLANE_PROP_MULTIRECT_MODE);
+
+		if (pstate[i]->const_alpha_en != const_alpha_enable)
+			const_alpha_enable = false;
+
 	}
 
 	buffer_lines = 2 * max_tile_height;
@@ -2936,8 +2941,10 @@
 		break;
 	}
 
-	for (i = 0; i < R_MAX; i++)
+	for (i = 0; i < R_MAX; i++) {
 		pstate[i]->multirect_mode = mode;
+		pstate[i]->const_alpha_en = const_alpha_enable;
+	}
 
 	if (mode == SDE_SSPP_MULTIRECT_NONE)
 		return -EINVAL;
@@ -3138,6 +3145,29 @@
 	return sde_vbif_halt_plane_xin(sde_kms, xin_id, clk_ctrl);
 }
 
+
+static inline int _sde_plane_power_enable(struct drm_plane *plane, bool enable)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane->dev || !plane->dev->dev_private) {
+		SDE_ERROR("invalid drm device\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	return sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+									enable);
+}
+
 static void sde_plane_cleanup_fb(struct drm_plane *plane,
 		struct drm_plane_state *old_state)
 {
@@ -3163,6 +3193,13 @@
 			       psde->pipe - SSPP_VIG0);
 
 		/* halt this plane now */
+		ret = _sde_plane_power_enable(plane, true);
+		if (ret) {
+			SDE_ERROR("power resource enable failed with %d", ret);
+			SDE_EVT32(ret);
+			return;
+		}
+
 		ret = _sde_plane_fetch_halt(plane);
 		if (ret) {
 			SDE_ERROR_PLANE(psde,
@@ -3171,6 +3208,7 @@
 			SDE_EVT32(DRMID(plane), psde->pipe - SSPP_VIG0,
 				       ret, SDE_EVTLOG_ERROR);
 		}
+		_sde_plane_power_enable(plane, false);
 	}
 
 	old_rstate = &old_pstate->rot;
@@ -3541,6 +3579,10 @@
 				pstate->excl_rect.w, pstate->excl_rect.h);
 	}
 
+	pstate->const_alpha_en = fmt->alpha_enable &&
+		(SDE_DRM_BLEND_OP_OPAQUE !=
+		 sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+
 modeset_update:
 	if (!ret)
 		_sde_plane_sspp_atomic_check_mode_changed(psde,
@@ -3638,7 +3680,6 @@
 	struct drm_framebuffer *fb;
 	struct sde_rect src, dst;
 	bool q16_data = true;
-	bool blend_enabled = true;
 	int idx;
 
 	if (!plane) {
@@ -3876,12 +3917,9 @@
 		if (rstate->out_rotation & DRM_REFLECT_Y)
 			src_flags |= SDE_SSPP_FLIP_UD;
 
-		blend_enabled = (SDE_DRM_BLEND_OP_OPAQUE !=
-			sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
-
 		/* update format */
 		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt,
-				blend_enabled, src_flags,
+				pstate->const_alpha_en, src_flags,
 				pstate->multirect_index);
 
 		if (psde->pipe_hw->ops.setup_cdp) {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index d1eb399..e8b621c 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -128,6 +128,7 @@
  * @dirty:	bitmask for which pipe h/w config functions need to be updated
  * @multirect_index: index of the rectangle of SSPP
  * @multirect_mode: parallel or time multiplex multirect mode
+ * @const_alpha_en: const alpha channel is enabled for this HW pipe
  * @pending:	whether the current update is still pending
  * @defer_prepare_fb:	indicate if prepare_fb call was deferred
  * @scaler3_cfg: configuration data for scaler3
@@ -146,6 +147,7 @@
 	uint32_t dirty;
 	uint32_t multirect_index;
 	uint32_t multirect_mode;
+	bool const_alpha_en;
 	bool pending;
 	bool defer_prepare_fb;
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index c34b198..5c72efa 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -71,7 +71,8 @@
 
 #define DBG_CTRL_STOP_FTRACE	BIT(0)
 #define DBG_CTRL_PANIC_UNDERRUN	BIT(1)
-#define DBG_CTRL_MAX			BIT(2)
+#define DBG_CTRL_RESET_HW_PANIC	BIT(2)
+#define DBG_CTRL_MAX			BIT(3)
 
 /**
  * struct sde_dbg_reg_offset - tracking for start and end of region
@@ -2709,7 +2710,6 @@
 	va_list args;
 	char *blk_name = NULL;
 
-
 	/* no debugfs controlled events are enabled, just return */
 	if (!sde_dbg_base.debugfs_ctrl)
 		return;
@@ -2738,8 +2738,16 @@
 			pr_debug("panic underrun\n");
 			panic("underrun");
 		}
+
+		if (!strcmp(blk_name, "reset_hw_panic") &&
+				sde_dbg_base.debugfs_ctrl &
+				DBG_CTRL_RESET_HW_PANIC) {
+			pr_debug("reset hw panic\n");
+			panic("reset_hw");
+		}
 	}
 
+	va_end(args);
 }
 
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 429ab01..ca4f0da 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -39,8 +39,11 @@
 /* this time is ~1ms - only wake tcs in any mode */
 #define RSC_BACKOFF_TIME_NS		 (SINGLE_TCS_EXECUTION_TIME + 100)
 
-/* this time is ~1ms - only wake TCS in mode-0 */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS	(SINGLE_TCS_EXECUTION_TIME + 100)
+/**
+ * this time is ~1ms - only wake TCS in mode-0.
+ * This time must be greater than backoff time.
+ */
+#define RSC_MODE_THRESHOLD_TIME_IN_NS	(RSC_BACKOFF_TIME_NS + 2700)
 
 /* this time is ~2ms - sleep+ wake TCS in mode-1 */
 #define RSC_TIME_SLOT_0_NS		((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5991cd5..89453b0 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -405,6 +405,7 @@
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
 #define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL   0x00010
+#define A6XX_RBBM_GPR0_CNTL              0x00018
 #define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f
 #define A6XX_RBBM_INT_CLEAR_CMD          0x00037
 #define A6XX_RBBM_INT_0_MASK             0x00038
@@ -809,7 +810,7 @@
 /* GBIF registers */
 #define A6XX_GBIF_HALT                    0x3c45
 #define A6XX_GBIF_HALT_ACK                0x3c46
-#define A6XX_GBIF_HALT_MASK               0x1
+#define A6XX_GBIF_HALT_MASK               0x2
 
 #define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
 #define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
@@ -982,37 +983,8 @@
 
 /* ISENSE registers */
 #define A6XX_GMU_ISENSE_CTRL			0x1F95D
-#define A6XX_GPU_CS_ENABLE_REG			0x23120
 #define A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL		0x1f95d
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3	0x22d78
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2	0x22d58
-#define A6XX_GPU_CS_A_SENSOR_CTRL_0		0x22d80
-#define A6XX_GPU_CS_A_SENSOR_CTRL_2		0x422da
-#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS	0x2301a
-#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1	0x23157
-#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS	0x2301a
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0	0x2301d
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2	0x2301f
-#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4	0x23021
-#define A6XX_GPU_CS_AMP_CALIBRATION_DONE	0x23165
-#define A6XX_GPU_CS_AMP_PERIOD_CTRL		0x2316d
-#define A6XX_GPU_CS_AMP_CALIBRATION_DONE	0x23165
-
-#define CS_PWR_ON_STATUS			(10)
-#define AMP_SW_WRM_TRIM_START			(24)
-#define AMP_TRIM_TIMER				(6)
-#define AMP_SW_TRIM_START			(0)
-#define SS_AMPTRIM_DONE				(11)
-#define AMP_OFFSET_CHECK_MIN_ERR		(1)
-#define AMP_OFFSET_CHECK_MAX_ERR		(2)
-#define AMP_OUT_OF_RANGE_ERR			(4)
-#define TRIM_CNT_VALUE				(1)
-#define RUNTIME_CNT_VALUE			(16)
-#define TRIM_ENABLE				(0)
-
-#define AMP_ERR			(BIT(AMP_OFFSET_CHECK_MIN_ERR) || \
-				BIT(AMP_OFFSET_CHECK_MAX_ERR) || \
-				BIT(AMP_OUT_OF_RANGE_ERR))
+#define A6XX_GPU_CS_ENABLE_REG			0x23120
 
 /* LM registers */
 #define A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD       0x1F94D
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 770cf3b..08cd06b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -347,7 +347,7 @@
 		.minor = 0,
 		.patchid = ANY_ID,
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
-			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM |
+			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
 			ADRENO_IOCOHERENT,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index b77f6e1..0dd1921 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -226,6 +226,10 @@
 #define ADRENO_HWCG_CTRL    3
 #define ADRENO_THROTTLING_CTRL 4
 
+/* VBIF,  GBIF halt request and ack mask */
+#define GBIF_HALT_REQUEST       0x1E0
+#define VBIF_RESET_ACK_MASK     0x00f0
+#define VBIF_RESET_ACK_TIMEOUT  100
 
 /* number of throttle counters for DCVS adjustment */
 #define ADRENO_GPMU_THROTTLE_COUNTERS 4
@@ -683,6 +687,8 @@
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
 	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+	ADRENO_REG_RBBM_GPR0_CNTL,
+	ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 	ADRENO_REG_VBIF_VERSION,
@@ -1889,17 +1895,15 @@
  * @ack_reg: register offset to wait for acknowledge
  */
 static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device,
-	int ack_reg)
+	int ack_reg, unsigned int mask)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned long wait_for_vbif;
-	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
 	unsigned int val;
 	int ret = 0;
 
 	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	wait_for_vbif = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
 	while (1) {
 		adreno_readreg(adreno_dev, ack_reg,
 			&val);
@@ -1929,15 +1933,27 @@
 	int ret = 0;
 
 	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Halt GBIF GX first and then CX part.
+		 * Need to release CX Halt explicitly in case of SW_RESET.
+		 * GX Halt release will be taken care by SW_RESET internally.
+		 */
+		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
+				GBIF_HALT_REQUEST);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+				VBIF_RESET_ACK_MASK);
+		if (ret)
+			return ret;
+
 		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
 		ret = adreno_wait_for_vbif_halt_ack(device,
-				ADRENO_REG_GBIF_HALT_ACK);
-		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0);
+				ADRENO_REG_GBIF_HALT_ACK, mask);
 	} else {
 		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 			mask);
 		ret = adreno_wait_for_vbif_halt_ack(device,
-				ADRENO_REG_VBIF_XIN_HALT_CTRL1);
+				ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask);
 		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	}
 	return ret;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 83dd3fb..09d6a10 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -43,6 +43,8 @@
 #define A6XX_GPU_CX_REG_BASE		0x509E000
 #define A6XX_GPU_CX_REG_SIZE		0x1000
 
+#define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
+
 static int _load_gmu_firmware(struct kgsl_device *device);
 
 static const struct adreno_vbif_data a630_vbif[] = {
@@ -758,6 +760,38 @@
 
 	a6xx_preemption_start(adreno_dev);
 	a6xx_protect_init(adreno_dev);
+
+	/*
+	 * We start LM here because we want all the following to be up
+	 * 1. GX HS
+	 * 2. SPTPRAC
+	 * 3. HFI
+	 * At this point, we are guaranteed all.
+	 */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
+		int result;
+		struct gmu_device *gmu = &device->gmu;
+		struct device *dev = &gmu->pdev->dev;
+
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
+			GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
+		kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
+
+		gmu->lm_config.lm_type = 1;
+		gmu->lm_config.lm_sensor_type = 1;
+		gmu->lm_config.throttle_config = 1;
+		gmu->lm_config.idle_throttle_en = 0;
+		gmu->lm_config.acd_en = 0;
+		gmu->bcl_config = 0;
+		gmu->lm_dcvs_level = 0;
+
+		result = hfi_send_lmconfig(gmu);
+		if (result)
+			dev_err(dev, "Failure enabling limits management (%d)\n",
+			result);
+	}
 }
 
 /*
@@ -1713,80 +1747,6 @@
 	return 0;
 }
 
-#define KMASK(start, n) (GENMASK((start + n), (start)))
-
-static void isense_cold_trimm(struct kgsl_device *device)
-{
-	unsigned int reg;
-	struct gmu_device *gmu = &device->gmu;
-
-	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 0);
-
-	kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3,
-		0x00000F8F);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2,
-		0x00705161);
-	udelay(10);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0x3);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_0, 0x10040a);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_2, 0x10040a);
-
-	kgsl_gmu_regread(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS, &reg);
-	if ((reg & BIT(CS_PWR_ON_STATUS)) != (1 << CS_PWR_ON_STATUS)) {
-		dev_err(&gmu->pdev->dev, "ERROR - ISENSE power-up\n");
-		return;
-	}
-
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
-		KMASK(AMP_TRIM_TIMER, 15), 70 << AMP_TRIM_TIMER);
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
-		KMASK(AMP_SW_TRIM_START, 1), 0 << AMP_SW_TRIM_START);
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
-		KMASK(AMP_SW_TRIM_START, 1), 1 << AMP_SW_TRIM_START);
-
-	if (timed_poll_check(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS,
-		BIT(SS_AMPTRIM_DONE), GMU_START_TIMEOUT,
-		BIT(SS_AMPTRIM_DONE))) {
-		dev_err(&gmu->pdev->dev, "ISENSE SS_AMPTRIM failure\n");
-		return;
-	}
-
-	kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0, &reg);
-	if (reg & AMP_ERR) {
-		kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0,
-			&reg);
-		dev_err(&gmu->pdev->dev,
-			"ISENSE ERROR:trimming GX 0x%08x\n", reg);
-		return;
-	}
-
-	kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2, &reg);
-	if (reg & AMP_ERR) {
-		kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2,
-			&reg);
-		dev_err(&gmu->pdev->dev,
-			"ISENSE ERROR:trimming SPTPRAC 0x%08x\n", reg);
-		return;
-	}
-
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
-		KMASK(TRIM_CNT_VALUE, 13), 20 << TRIM_CNT_VALUE);
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
-		KMASK(RUNTIME_CNT_VALUE, 9), 50 << RUNTIME_CNT_VALUE);
-
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
-		KMASK(TRIM_ENABLE, 1), 1 << TRIM_ENABLE);
-	udelay(4);
-	kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
-		KMASK(TRIM_ENABLE, 1), 0 << TRIM_ENABLE);
-	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
-
-}
-
-#define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
 /*
  * a6xx_gmu_fw_start() - set up GMU and start FW
  * @device: Pointer to KGSL device
@@ -1867,13 +1827,6 @@
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_HFI_SFR_ADDR, chipid);
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
-		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
-			GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
-		isense_cold_trimm(device);
-	}
-
 	/* Configure power control and bring the GMU out of reset */
 	a6xx_gmu_power_config(device);
 	ret = a6xx_gmu_start(device);
@@ -2096,8 +2049,7 @@
 	return _load_gmu_firmware(device);
 }
 
-#define VBIF_RESET_ACK_TIMEOUT	100
-#define VBIF_RESET_ACK_MASK	0x00f0
+#define GBIF_CX_HALT_MASK BIT(1)
 
 static int a6xx_soft_reset(struct adreno_device *adreno_dev)
 {
@@ -2138,6 +2090,13 @@
 	if (!vbif_acked)
 		return -ETIMEDOUT;
 
+	/*
+	 * GBIF GX halt will be released automatically by sw_reset.
+	 * Release GBIF CX halt after sw_reset
+	 */
+	if (adreno_has_gbif(adreno_dev))
+		kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0);
+
 	a6xx_sptprac_enable(adreno_dev);
 
 	return 0;
@@ -2354,8 +2313,14 @@
 			udelay(100);
 		}
 
-		if (acked)
-			ret = adreno_soft_reset(device);
+		if (acked) {
+			/* Make sure VBIF/GBIF is cleared before resetting */
+			ret = adreno_vbif_clear_pending_transactions(device);
+
+			if (ret == 0)
+				ret = adreno_soft_reset(device);
+		}
+
 		if (ret)
 			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
 	}
@@ -3692,6 +3657,9 @@
 				A6XX_VBIF_XIN_HALT_CTRL0),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 				A6XX_VBIF_XIN_HALT_CTRL1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+				A6XX_RBBM_VBIF_GX_RESET_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
 	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 3a5b489..daac9f1 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -611,24 +611,6 @@
 	if (result)
 		return result;
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
-		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
-		gmu->lm_config.lm_type = 1;
-		gmu->lm_config.lm_sensor_type = 1;
-		gmu->lm_config.throttle_config = 1;
-		gmu->lm_config.idle_throttle_en = 0;
-		gmu->lm_config.acd_en = 0;
-		gmu->bcl_config = 0;
-		gmu->lm_dcvs_level = 0;
-
-		result = hfi_send_lmconfig(gmu);
-		if (result) {
-			dev_err(dev, "Failure enabling LM (%d)\n",
-					result);
-			return result;
-		}
-	}
-
 	/* Tell the GMU we are sending no more HFIs until the next boot */
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		result = hfi_send_test(gmu);
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 105599c..b24509d 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -360,4 +360,5 @@
 		uint32_t bw_idx, enum rpm_ack_type ack_type);
 int hfi_notify_slumber(struct gmu_device *gmu, uint32_t init_perf_idx,
 		uint32_t init_bw_idx);
+int hfi_send_lmconfig(struct gmu_device *gmu);
 #endif  /* __KGSL_HFI_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ab5ca25..cda7a5b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2124,6 +2124,45 @@
 		devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk);
 }
 
+static bool _gpu_freq_supported(struct kgsl_pwrctrl *pwr, unsigned int freq)
+{
+	int i;
+
+	for (i = pwr->num_pwrlevels - 2; i >= 0; i--) {
+		if (pwr->pwrlevels[i].gpu_freq == freq)
+			return true;
+	}
+
+	return false;
+}
+
+static void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device)
+{
+	struct device *dev = &device->pdev->dev;
+	struct dev_pm_opp *opp;
+	unsigned long freq = 0;
+	int ret;
+
+	ret = dev_pm_opp_get_opp_count(dev);
+	/* Return early, If no OPP table or OPP count is zero */
+	if (ret <= 0)
+		return;
+
+	while (1) {
+		rcu_read_lock();
+		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+		rcu_read_unlock();
+
+		if (IS_ERR(opp))
+			break;
+
+		if (!_gpu_freq_supported(&device->pwrctrl, freq))
+			dev_pm_opp_disable(dev, freq);
+
+		freq++;
+	}
+}
+
 int kgsl_pwrctrl_init(struct kgsl_device *device)
 {
 	int i, k, m, n = 0, result;
@@ -2181,6 +2220,8 @@
 			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
+	kgsl_pwrctrl_disable_unused_opp(device);
+
 	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
 	clk_set_rate(pwr->grp_clks[6],
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 86438a9..6324728 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -47,6 +47,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 #include <linux/of_platform.h>
 #include <linux/msm-bus.h>
@@ -4446,34 +4447,60 @@
 	return ret;
 }
 
+#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
+#define SCM_CONFIG_ERRATA1 0x3
 static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
 {
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct device *dev = smmu_domain->dev;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	void __iomem *base;
+	int ret;
 	ktime_t cur;
 	u32 val;
+	struct scm_desc desc = {
+		.args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
+		.args[1] = false,
+		.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
+	};
 
 	base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 	writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
 	writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
+	if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+				      !(val & TLBSTATUS_SACTIVE), 0, 100))
+		return;
+
+	ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
+					    SCM_CONFIG_ERRATA1),
+			       &desc);
+	if (ret) {
+		dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
+		BUG();
+		return;
+	}
+
+	cur = ktime_get();
+	trace_tlbi_throttle_start(dev, 0);
+	msm_bus_noc_throttle_wa(true);
+
 	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
-				      !(val & TLBSTATUS_SACTIVE), 0, 100)) {
-		cur = ktime_get();
-		trace_tlbi_throttle_start(dev, 0);
+			      !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
+		dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
+		trace_tlbsync_timeout(dev, 0);
+		BUG();
+	}
 
-		msm_bus_noc_throttle_wa(true);
-		if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
-				      !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
-			dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout");
-			trace_tlbsync_timeout(dev, 0);
-		}
+	msm_bus_noc_throttle_wa(false);
+	trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
 
-		msm_bus_noc_throttle_wa(false);
-
-		trace_tlbi_throttle_end(
-				dev, ktime_us_delta(ktime_get(), cur));
+	desc.args[1] = true;
+	ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
+					    SCM_CONFIG_ERRATA1),
+			       &desc);
+	if (ret) {
+		dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
+		BUG();
 	}
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index 4418fb1..3c572f0 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -230,7 +230,7 @@
 			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
 			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
 			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
-			.value = 0,
+			.value = 0x2,
 		},
 		.danger_lut = {
 			.enable = false,
@@ -258,7 +258,7 @@
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
-			.value = 0x44443333,
+			.value = 0x66666543,
 		},
 		.priority_lut_high = {
 			.enable = true,
@@ -288,7 +288,7 @@
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
-			.value = 0x3,
+			.value = 0x1,
 		},
 		.ubwc_ctl = {
 			.enable = true,
@@ -306,7 +306,7 @@
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
-			.value = 0x44443333,
+			.value = 0x66666543,
 		},
 		.priority_lut_high = {
 			.enable = true,
@@ -336,7 +336,7 @@
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
-			.value = 0x3,
+			.value = 0x1,
 		},
 		.ubwc_ctl = {
 			.enable = true,
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index ce7a8b3..b9b59a1 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -123,8 +123,9 @@
  * power collapse for IPE and BPS hardware.
  *
  * @enable: flag to enable/disable
+ * @core_info: Core information to firmware
  */
-int hfi_enable_ipe_bps_pc(bool enable);
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info);
 
 /**
  * hfi_cmd_ubwc_config() - UBWC configuration to firmware
@@ -132,4 +133,15 @@
  */
 int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
 
+/**
+ * cam_hfi_resume() - function to resume
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug);
+
 #endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index eb4b132..6909972 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -51,6 +51,15 @@
 #define ICP_CSR_DBGSWENABLE                     (1 << 22)
 #define ICP_CSR_A5_STATUS_WFI                   (1 << 7)
 
+#define ICP_FLAG_A5_CTRL_DBG_EN                 (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EDBGRQ|\
+						ICP_CSR_DBGSWENABLE)
+
+#define ICP_FLAG_A5_CTRL_EN                     (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EN_CLKGATE_WFI)
+
 /* start of Queue table and queues */
 #define MAX_ICP_HFI_QUEUES                      4
 #define ICP_QHDR_TX_TYPE_MASK                   0xFF000000
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
index aaa18bb..84cc129 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
@@ -254,9 +254,11 @@
  * struct hfi_ipe_bps_pc
  * payload structure to configure HFI_PROPERTY_SYS_IPEBPS_PC
  * @enable: Flag to enable IPE, BPS interfrane power collapse
+ * @core_info: Core information to firmware
  */
 struct hfi_ipe_bps_pc {
 	uint32_t enable;
+	uint32_t core_info;
 } __packed;
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index a8855ae..eca16d6 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -40,6 +40,8 @@
 #define HFI_VERSION_INFO_STEP_BMSK   0xFF
 #define HFI_VERSION_INFO_STEP_SHFT  0
 
+#define HFI_MAX_POLL_TRY 5
+
 static struct hfi_info *g_hfi;
 unsigned int g_icp_mmu_hdl;
 static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -248,7 +250,7 @@
 	return 0;
 }
 
-int hfi_enable_ipe_bps_pc(bool enable)
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info)
 {
 	uint8_t *prop;
 	struct hfi_cmd_prop *dbg_prop;
@@ -267,6 +269,7 @@
 	dbg_prop->num_prop = 1;
 	dbg_prop->prop_data[0] = HFI_PROP_SYS_IPEBPS_PC;
 	dbg_prop->prop_data[1] = enable;
+	dbg_prop->prop_data[2] = core_info;
 
 	hfi_write_cmd(prop);
 	kfree(prop);
@@ -420,14 +423,28 @@
 {
 	uint32_t data;
 	uint32_t val;
+	uint32_t try = 0;
 
-	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
-	/* Add waiting logic in case it is not idle */
-	if (data & ICP_CSR_A5_STATUS_WFI) {
-		val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
-		val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
-		cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	while (try < HFI_MAX_POLL_TRY) {
+		data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+		CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
+
+		if (data & ICP_CSR_A5_STATUS_WFI)
+			break;
+		/* Need to poll here to confirm that FW is going trigger wfi
+		 * and Host can the proceed. No interrupt is expected from FW
+		 * at this time.
+		 */
+		msleep(100);
+		try++;
 	}
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+	cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+	cam_io_w(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
 }
 
 void cam_hfi_enable_cpu(void __iomem *icp_base)
@@ -437,6 +454,64 @@
 	cam_io_w((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
 }
 
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	uint32_t data;
+	uint32_t fw_version, status = 0;
+
+	cam_hfi_enable_cpu(icp_base);
+	g_hfi->csr_base = icp_base;
+
+	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
+
+	if (rc) {
+		CAM_ERR(CAM_HFI, "timed out , status = %u", status);
+		return -EINVAL;
+	}
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
+
+	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	if (debug) {
+		cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
+			(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
+
+		/* Barrier needed as next write should be done after
+		 * sucessful previous write. Next write enable clock
+		 * gating
+		 */
+		wmb();
+
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	} else {
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+	CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
+
+	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+
+	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+	return rc;
+}
+
 int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
 		void __iomem *icp_base, bool debug)
 {
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 635d0df..aeec16c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -422,6 +422,11 @@
 	case CAM_ICP_A5_SEND_INIT:
 		hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
 		break;
+
+	case CAM_ICP_A5_CMD_PC_PREP:
+		hfi_send_system_cmd(HFI_CMD_SYS_PC_PREP, 0, 0);
+		break;
+
 	case CAM_ICP_A5_CMD_VOTE_CPAS: {
 		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 3473d08..c18a5e4 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -157,8 +157,11 @@
 			CAM_CPAS_REG_CPASTOP,
 			hw_info->pwr_ctrl, true, 0x1);
 
-		if ((pwr_status >> BPS_PWR_ON_MASK))
+		if ((pwr_status >> BPS_PWR_ON_MASK)) {
+			CAM_ERR(CAM_ICP, "BPS: pwr_status(%x):pwr_ctrl(%x)",
+				pwr_status, pwr_ctrl);
 			return -EINVAL;
+		}
 	}
 	cam_bps_get_gdsc_control(soc_info);
 	cam_cpas_reg_read(core_info->cpas_handle,
@@ -189,7 +192,7 @@
 	cam_cpas_reg_read(core_info->cpas_handle,
 		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
 	if (pwr_ctrl & BPS_COLLAPSE_MASK) {
-		CAM_ERR(CAM_ICP, "BPS: resume failed : %d", pwr_ctrl);
+		CAM_ERR(CAM_ICP, "BPS: pwr_ctrl(%x)", pwr_ctrl);
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 6f997a2..29a1b9a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -729,9 +729,6 @@
 	struct cam_hw_intf *bps_dev_intf = NULL;
 	int rc = 0;
 
-	if (!icp_hw_mgr.icp_pc_flag)
-		return rc;
-
 	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
 	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
 	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
@@ -741,36 +738,61 @@
 		return -EINVAL;
 	}
 
-	bps_dev_intf->hw_ops.process_cmd(
-		bps_dev_intf->hw_priv,
-		CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		if (hw_mgr->bps_ctxt_cnt++)
+			goto end;
+		bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
+		if (icp_hw_mgr.icp_pc_flag) {
+			bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
+			hw_mgr->core_info = hw_mgr->core_info | ICP_PWR_CLP_BPS;
+		}
+	} else {
+		if (hw_mgr->ipe_ctxt_cnt++)
+			goto end;
 
-	ipe0_dev_intf->hw_ops.process_cmd(
-		ipe0_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+		ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
+		if (icp_hw_mgr.icp_pc_flag) {
+			ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+		}
 
-	if (ipe1_dev_intf) {
-		ipe1_dev_intf->hw_ops.process_cmd(
-			ipe1_dev_intf->hw_priv,
-			CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+		if ((icp_hw_mgr.ipe1_enable) && (ipe1_dev_intf)) {
+			ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+
+			if (icp_hw_mgr.icp_pc_flag) {
+				ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_RESUME,
+					NULL, 0);
+			}
+		}
+		if (icp_hw_mgr.icp_pc_flag) {
+			hw_mgr->core_info = hw_mgr->core_info |
+				(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1);
+		}
 	}
 
-	rc = hfi_enable_ipe_bps_pc(true);
-
+	CAM_DBG(CAM_ICP, "core_info %X",  hw_mgr->core_info);
+	if (icp_hw_mgr.icp_pc_flag)
+		rc = hfi_enable_ipe_bps_pc(true, hw_mgr->core_info);
+	else
+		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
+end:
 	return rc;
 }
 
 static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data, int dev_type)
 {
-	int rc = 0;
+	int rc = 0, dev;
 	struct cam_hw_intf *ipe0_dev_intf = NULL;
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 
-	if (!icp_hw_mgr.icp_pc_flag)
-		return rc;
-
 	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
 	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
 	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
@@ -780,20 +802,64 @@
 		return -EINVAL;
 	}
 
-	rc = bps_dev_intf->hw_ops.process_cmd(
-		bps_dev_intf->hw_priv,
-		CAM_ICP_BPS_CMD_POWER_COLLAPSE, NULL, 0);
+	if (!ctx_data)
+		dev = dev_type;
+	else
+		dev = ctx_data->icp_dev_acquire_info->dev_type;
 
-	rc = ipe0_dev_intf->hw_ops.process_cmd(
-		ipe0_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+	if (dev == CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "bps ctx cnt %d", hw_mgr->bps_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->bps_ctxt_cnt;
 
-	if (ipe1_dev_intf) {
-		rc = ipe1_dev_intf->hw_ops.process_cmd(
-			ipe1_dev_intf->hw_priv,
-			CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+		if (hw_mgr->bps_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.icp_pc_flag) {
+			rc = bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+				NULL, 0);
+			hw_mgr->core_info =
+				hw_mgr->core_info & (~ICP_PWR_CLP_BPS);
+		}
+
+		bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	} else {
+		CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->ipe_ctxt_cnt;
+
+		if (hw_mgr->ipe_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.icp_pc_flag) {
+			rc = ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+
+		}
+		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+
+		if (ipe1_dev_intf) {
+			if (icp_hw_mgr.icp_pc_flag) {
+				rc = ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+					NULL, 0);
+			}
+
+			ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+		}
+		if (icp_hw_mgr.icp_pc_flag) {
+			hw_mgr->core_info = hw_mgr->core_info &
+				(~(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1));
+		}
 	}
 
+	CAM_DBG(CAM_ICP, "Exit: core_info = %x", hw_mgr->core_info);
+end:
 	return rc;
 }
 
@@ -844,6 +910,7 @@
 		rc = -ENOMEM;
 		goto err;
 	}
+	icp_hw_mgr.icp_pc_flag = 1;
 
 	if (!debugfs_create_file("icp_debug_clk",
 		0644,
@@ -1229,13 +1296,19 @@
 {
 	int rc = 0;
 	int size_processed = 0;
-	struct hfi_msg_ipebps_async_ack *async_ack = NULL;
 
 	switch (msg_ptr[ICP_PACKET_TYPE]) {
 	case HFI_MSG_SYS_INIT_DONE:
 		CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
 		complete(&hw_mgr->a5_complete);
-		size_processed = sizeof(struct hfi_msg_init_done);
+		size_processed = (
+			(struct hfi_msg_init_done *)msg_ptr)->size;
+		break;
+
+	case HFI_MSG_SYS_PC_PREP_DONE:
+		CAM_DBG(CAM_ICP, "HFI_MSG_SYS_PC_PREP_DONE is received\n");
+		complete(&hw_mgr->a5_complete);
+		size_processed = sizeof(struct hfi_msg_pc_prep_done);
 		break;
 
 	case HFI_MSG_SYS_PING_ACK:
@@ -1253,20 +1326,21 @@
 	case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
 		CAM_DBG(CAM_ICP, "received ASYNC_INDIRECT_ACK");
 		rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
-		async_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
-		size_processed = async_ack->size;
-		async_ack = NULL;
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
 		break;
 
 	case  HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
 		CAM_DBG(CAM_ICP, "received ASYNC_DIRECT_ACK");
 		rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
-		size_processed = sizeof(struct hfi_msg_ipebps_async_ack);
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
 		break;
 
 	case HFI_MSG_EVENT_NOTIFY:
 		CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
-		size_processed = sizeof(struct hfi_msg_event_notify);
+		size_processed = (
+			(struct hfi_msg_event_notify *)msg_ptr)->size;
 		break;
 
 	default:
@@ -1357,13 +1431,13 @@
 {
 	int rc;
 	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc)
+		CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
-	rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
-	if (rc)
-		CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
 }
 
 static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
@@ -1534,6 +1608,159 @@
 	ctx_data->state = CAM_ICP_CTX_STATE_FREE;
 }
 
+static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&hw_mgr->a5_complete);
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv, CAM_ICP_A5_CMD_PC_PREP, NULL, 0);
+	if (rc)
+		return rc;
+
+	CAM_DBG(CAM_ICP, "Wait for PC_PREP_DONE Message\n");
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "PC_PREP response timed out %d\n", rc);
+	}
+	CAM_DBG(CAM_ICP, "Done Waiting for PC_PREP Message\n");
+
+	return rc;
+}
+
+static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return 0;
+	}
+
+	if (ipe1_dev_intf) {
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+	}
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	return 0;
+}
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+
+	CAM_DBG(CAM_ICP, "ENTER");
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+
+	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	CAM_DBG(CAM_ICP, "EXIT");
+
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+	 CAM_DBG(CAM_ICP, "kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.qtbl.kva, hfi_mem.qtbl.iova, hfi_mem.qtbl.len);
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+	CAM_DBG(CAM_ICP, "kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.cmd_q.kva, hfi_mem.cmd_q.iova, hfi_mem.cmd_q.len);
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+	CAM_DBG(CAM_ICP, "kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.msg_q.kva, hfi_mem.msg_q.iova, hfi_mem.msg_q.len);
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+	CAM_DBG(CAM_ICP, "kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.dbg_q.kva, hfi_mem.dbg_q.iova, hfi_mem.dbg_q.len);
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+	return cam_hfi_resume(&hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+
+	CAM_DBG(CAM_ICP, "Enter");
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		return -EINVAL;
+
+	rc = cam_icp_mgr_hfi_resume(hw_mgr);
+	if (rc)
+		goto hfi_resume_failed;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+hfi_resume_failed:
+	cam_icp_mgr_icp_power_collapse(hw_mgr);
+	return rc;
+}
 static int cam_icp_mgr_abort_handle(
 	struct cam_icp_hw_ctx_data *ctx_data)
 {
@@ -1673,13 +1900,12 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	if (hw_mgr->ctx_data[ctx_id].state !=
 		CAM_ICP_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		CAM_WARN(CAM_ICP,
+		CAM_DBG(CAM_ICP,
 			"ctx with id: %d not in right state to release: %d",
 			ctx_id, hw_mgr->ctx_data[ctx_id].state);
 		return 0;
@@ -1704,7 +1930,6 @@
 	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
 	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
 }
@@ -1739,10 +1964,11 @@
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	struct cam_icp_a5_set_irq_cb irq_cb;
 	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
-	int i, rc = 0;
+	int rc = 0;
 
+	CAM_DBG(CAM_ICP, "E");
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	if ((hw_mgr->fw_download ==  false) && (!hw_mgr->ctxt_cnt)) {
+	if (hw_mgr->fw_download == false) {
 		CAM_DBG(CAM_ICP, "hw mgr is already closed");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return 0;
@@ -1750,7 +1976,7 @@
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	if (!a5_dev_intf) {
-		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+		CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
 	}
@@ -1765,14 +1991,8 @@
 		sizeof(fw_buf_info));
 	if (rc)
 		CAM_ERR(CAM_ICP, "nullify the fw buf failed");
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
-		cam_icp_mgr_release_ctx(hw_mgr, i);
-
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	cam_hfi_deinit();
-	cam_icp_mgr_device_deinit(hw_mgr);
 
 	irq_cb.icp_hw_mgr_cb = NULL;
 	irq_cb.data = NULL;
@@ -2020,21 +2240,18 @@
 		goto fw_init_failed;
 	}
 
-	rc = a5_dev_intf->hw_ops.process_cmd(
-		a5_dev_intf->hw_priv,
-		CAM_ICP_A5_CMD_POWER_COLLAPSE,
-		NULL, 0);
-	hw_mgr->fw_download = true;
 	hw_mgr->ctxt_cnt = 0;
-	CAM_DBG(CAM_ICP, "FW download done successfully");
 
 	if (icp_hw_mgr.a5_debug_q)
 		hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
 
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
-	if (!download_fw_args)
-		cam_icp_mgr_hw_close(hw_mgr, NULL);
 
+	rc = cam_ipe_bps_deint(hw_mgr);
+	rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
+
+	hw_mgr->fw_download = true;
+	CAM_DBG(CAM_ICP, "FW download done successfully");
 	return rc;
 
 fw_init_failed:
@@ -2504,7 +2721,7 @@
 
 	ctx_data = release_hw->ctxt_to_hw_map;
 	if (!ctx_data) {
-		CAM_ERR(CAM_ICP, "NULL ctx");
+		CAM_ERR(CAM_ICP, "NULL ctx data");
 		return -EINVAL;
 	}
 
@@ -2527,14 +2744,15 @@
 		cam_icp_mgr_send_abort_status(ctx_data);
 	}
 
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
 	if (!hw_mgr->ctxt_cnt) {
-		cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
-			NULL, 0);
-		cam_icp_mgr_hw_close(hw_mgr, NULL);
+		CAM_DBG(CAM_ICP, "Last Release");
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
 		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
 		hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return rc;
 }
@@ -2676,8 +2894,10 @@
 
 	if (copy_from_user(&icp_dev_acquire_info,
 		(void __user *)args->acquire_info,
-		sizeof(struct cam_icp_acquire_dev_info)))
+		sizeof(struct cam_icp_acquire_dev_info))) {
+		CAM_ERR(CAM_ICP, "Failed in acquire");
 		return -EFAULT;
+	}
 
 	if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
 		CAM_ERR(CAM_ICP, "Invalid mode:%d",
@@ -2710,7 +2930,7 @@
 	}
 
 	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
-		(icp_dev_acquire_info.num_out_res *
+		((icp_dev_acquire_info.num_out_res - 1) *
 		sizeof(struct cam_icp_res_info));
 	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
 	if (!ctx_data->icp_dev_acquire_info) {
@@ -2721,6 +2941,7 @@
 
 	if (copy_from_user(ctx_data->icp_dev_acquire_info,
 		(void __user *)args->acquire_info, acquire_size)) {
+		CAM_ERR(CAM_ICP, "Failed in acquire: size = %d", acquire_size);
 		if (!hw_mgr->ctxt_cnt)
 			hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 		kfree(ctx_data->icp_dev_acquire_info);
@@ -2804,21 +3025,28 @@
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (!hw_mgr->ctxt_cnt) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
-		if (rc)
+		if (rc) {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
 			goto get_io_buf_failed;
-		rc = cam_icp_mgr_hw_open(hw_mgr, ctx_data);
-		if (rc)
+		}
+
+		rc = cam_icp_mgr_icp_resume(hw_mgr);
+		if (rc) {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
 			goto get_io_buf_failed;
-		rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
-		if (rc)
-			goto ipe_bps_resume_failed;
+		}
 
 		rc = cam_icp_send_ubwc_cfg(hw_mgr);
-		if (rc)
+		if (rc) {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
 			goto ubwc_cfg_failed;
-		mutex_lock(&hw_mgr->hw_mgr_mutex);
+		}
+	}
+	rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		goto ipe_bps_resume_failed;
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
@@ -2827,6 +3055,7 @@
 		CAM_ERR(CAM_ICP, "ping ack not received");
 		goto send_ping_failed;
 	}
+	CAM_DBG(CAM_ICP, "ping ack received");
 
 	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
 		ctx_data);
@@ -2866,6 +3095,7 @@
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	hw_mgr->ctxt_cnt++;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	CAM_DBG(CAM_ICP, "Acquire Done");
 
 	return 0;
 
@@ -2876,11 +3106,11 @@
 	cam_icp_mgr_destroy_handle(ctx_data);
 create_handle_failed:
 send_ping_failed:
-ubwc_cfg_failed:
 	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
 ipe_bps_resume_failed:
+ubwc_cfg_failed:
 	if (!hw_mgr->ctxt_cnt)
-		cam_icp_mgr_hw_close(hw_mgr, NULL);
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
 get_io_buf_failed:
 	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
 	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index ab19f45..e8919e8 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -252,6 +252,7 @@
  * @ipe0_enable: Flag for IPE0
  * @ipe1_enable: Flag for IPE1
  * @bps_enable: Flag for BPS
+ * @core_info: 32 bit value , tells IPE0/1 and BPS
  */
 struct cam_icp_hw_mgr {
 	struct mutex hw_mgr_mutex;
@@ -288,9 +289,11 @@
 	bool ipe0_enable;
 	bool ipe1_enable;
 	bool bps_enable;
+	uint32_t core_info;
 };
 
 static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
 static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
-
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr);
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr);
 #endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
index dad7736..fd0482c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -32,6 +32,7 @@
 	CAM_ICP_A5_CMD_CPAS_START,
 	CAM_ICP_A5_CMD_CPAS_STOP,
 	CAM_ICP_A5_CMD_UBWC_CFG,
+	CAM_ICP_A5_CMD_PC_PREP,
 	CAM_ICP_A5_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 9bdde9c..ccab3a0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1986,6 +1986,7 @@
 	struct cam_hw_release_args       *release_args = release_hw_args;
 	struct cam_ife_hw_mgr            *hw_mgr       = hw_mgr_priv;
 	struct cam_ife_hw_mgr_ctx        *ctx;
+	uint32_t                          i;
 
 	if (!hw_mgr_priv || !release_hw_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -2015,6 +2016,14 @@
 	list_del_init(&ctx->list);
 	ctx->ctx_in_use = 0;
 	ctx->is_rdi_only_context = 0;
+	ctx->cdm_handle = 0;
+	ctx->cdm_ops = NULL;
+	atomic_set(&ctx->overflow_pending, 0);
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		ctx->sof_cnt[i] = 0;
+		ctx->eof_cnt[i] = 0;
+		ctx->epoch_cnt[i] = 0;
+	}
 	CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
 		ctx->ctx_index);
 	cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 4bee732..031b7b2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -316,14 +316,15 @@
 		cam_io_w_mb(irq_mask, controller->mem_base +
 			controller->irq_register_arr[i].mask_reg_offset);
 	}
-	if (need_lock)
-		spin_unlock_irqrestore(&controller->lock, flags);
 
 	list_add_tail(&evt_handler->list_node,
 		&controller->evt_handler_list_head);
 	list_add_tail(&evt_handler->th_list_node,
 		&controller->th_list_head[priority]);
 
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
 	return evt_handler->index;
 
 free_evt_handler:
@@ -348,6 +349,10 @@
 	if (!controller)
 		return rc;
 
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
 	list_for_each_entry_safe(evt_handler, evt_handler_temp,
 		&controller->evt_handler_list_head, list_node) {
 		if (evt_handler->index == handle) {
@@ -358,12 +363,12 @@
 		}
 	}
 
-	if (!found)
+	if (!found) {
+		if (need_lock)
+			spin_unlock_irqrestore(&controller->lock, flags);
 		return rc;
+	}
 
-	need_lock = !in_irq();
-	if (need_lock)
-		spin_lock_irqsave(&controller->lock, flags);
 	for (i = 0; i < controller->num_registers; i++) {
 		controller->irq_register_arr[i].
 		top_half_enable_mask[evt_handler->priority] |=
@@ -398,6 +403,10 @@
 	if (!controller)
 		return rc;
 
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
 	list_for_each_entry_safe(evt_handler, evt_handler_temp,
 		&controller->evt_handler_list_head, list_node) {
 		if (evt_handler->index == handle) {
@@ -408,12 +417,12 @@
 		}
 	}
 
-	if (!found)
+	if (!found) {
+		if (need_lock)
+			spin_unlock_irqrestore(&controller->lock, flags);
 		return rc;
+	}
 
-	need_lock = !in_irq();
-	if (need_lock)
-		spin_lock_irqsave(&controller->lock, flags);
 	for (i = 0; i < controller->num_registers; i++) {
 		controller->irq_register_arr[i].
 		top_half_enable_mask[evt_handler->priority] &=
@@ -459,6 +468,10 @@
 	int                         rc = -EINVAL;
 	bool                        need_lock;
 
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
 	list_for_each_entry_safe(evt_handler, evt_handler_temp,
 		&controller->evt_handler_list_head, list_node) {
 		if (evt_handler->index == handle) {
@@ -471,11 +484,7 @@
 		}
 	}
 
-	need_lock = !in_irq();
-
 	if (found) {
-		if (need_lock)
-			spin_lock_irqsave(&controller->lock, flags);
 		for (i = 0; i < controller->num_registers; i++) {
 			controller->irq_register_arr[i].
 				top_half_enable_mask[evt_handler->priority] &=
@@ -501,13 +510,14 @@
 					controller->mem_base +
 					controller->global_clear_offset);
 		}
-		if (need_lock)
-			spin_unlock_irqrestore(&controller->lock, flags);
 
 		kfree(evt_handler->evt_bit_mask_arr);
 		kfree(evt_handler);
 	}
 
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
 	return rc;
 }
 
@@ -630,8 +640,6 @@
 					i, j, need_th_processing[j]);
 		}
 	}
-	CAM_DBG(CAM_ISP, "unlocked controller %pK name %s lock %pK",
-		controller, controller->name, &controller->lock);
 
 	CAM_DBG(CAM_ISP, "Status Registers read Successful");
 
@@ -649,6 +657,8 @@
 		}
 	}
 	spin_unlock(&controller->lock);
+	CAM_DBG(CAM_ISP, "unlocked controller %pK name %s lock %pK",
+		controller, controller->name, &controller->lock);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 4a7eb00..70c9c3b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1497,9 +1497,9 @@
 	struct cam_isp_resource_node    *res)
 {
 	int rc = 0;
+	uint32_t val = 0;
 	struct cam_ife_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
-	uint32_t val = 0;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -1596,10 +1596,10 @@
 	enum cam_ife_csid_halt_cmd       stop_cmd)
 {
 	int rc = 0;
+	uint32_t val = 0;
 	struct cam_ife_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info               *soc_info;
 	struct cam_ife_csid_path_cfg         *path_data;
-	uint32_t val = 0;
 
 	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
 	csid_reg = csid_hw->csid_info->csid_reg;
@@ -1656,21 +1656,6 @@
 
 	/* For slave mode, halt command should take it from master */
 
-	/* Enable the EOF interrupt for resume at boundary case */
-	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
-		init_completion(&csid_hw->csid_ipp_complete);
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
-		val |= CSID_PATH_INFO_INPUT_EOF;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
-	} else {
-		val &= ~(CSID_PATH_INFO_RST_DONE |
-			CSID_PATH_ERROR_FIFO_OVERFLOW);
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
-	}
-
 	return rc;
 }
 
@@ -1811,9 +1796,9 @@
 	struct cam_isp_resource_node    *res)
 {
 	int rc = 0;
+	uint32_t val = 0, id;
 	struct cam_ife_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
-	uint32_t val = 0, id;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -1889,9 +1874,9 @@
 	enum cam_ife_csid_halt_cmd                stop_cmd)
 {
 	int rc = 0;
+	uint32_t id;
 	struct cam_ife_csid_reg_offset       *csid_reg;
 	struct cam_hw_soc_info               *soc_info;
-	uint32_t  val = 0, id;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -1926,25 +1911,9 @@
 		return -EINVAL;
 	}
 
-
 	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_id);
 
-	init_completion(&csid_hw->csid_rdin_complete[id]);
-
-	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
-		val |= CSID_PATH_INFO_INPUT_EOF;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
-	} else {
-		val &= ~(CSID_PATH_INFO_RST_DONE |
-				CSID_PATH_ERROR_FIFO_OVERFLOW);
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
-	}
-
 	/*Halt the RDI path */
 	cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
 			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
@@ -2029,12 +1998,10 @@
 	struct cam_isp_resource_node    *res)
 {
 	int rc = 0;
+	uint32_t val = 0, id, status, path_status_reg;
 	struct cam_ife_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
 
-	struct completion  *complete;
-	uint32_t val = 0, id;
-
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
 
@@ -2060,19 +2027,19 @@
 	}
 
 	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
-		complete = &csid_hw->csid_ipp_complete;
+		path_status_reg = csid_reg->ipp_reg->csid_ipp_status_addr;
 	else
-		complete =  &csid_hw->csid_rdin_complete[res->res_id];
+		path_status_reg = csid_reg->rdi_reg[res->res_id]->
+			csid_rdi_status_addr;
 
-	rc = wait_for_completion_timeout(complete,
-		msecs_to_jiffies(IFE_CSID_TIMEOUT));
-	if (rc <= 0) {
-		CAM_ERR(CAM_ISP, "CSID%d stop at frame boundary failid:%drc:%d",
-			 csid_hw->hw_intf->hw_idx,
-			res->res_id, rc);
-		if (rc == 0)
-			/* continue even have timeout */
-			rc = -ETIMEDOUT;
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		path_status_reg, status,
+		(status == 1),
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Time out: Res id:%d Path has not halted",
+			res->res_id);
+		rc = -ETIMEDOUT;
 	}
 
 	/* Disable the interrupt */
@@ -2813,9 +2780,6 @@
 			CAM_ERR(CAM_ISP, "CSID:%d IPP EOF received",
 				csid_hw->hw_intf->hw_idx);
 
-		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
-			complete(&csid_hw->csid_ipp_complete);
-
 		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
 			CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
 				csid_hw->hw_intf->hw_idx);
@@ -2841,9 +2805,6 @@
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
 			CAM_ERR(CAM_ISP, "CSID RDI:%d EOF received", i);
 
-		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
-			complete(&csid_hw->csid_rdin_complete[i]);
-
 		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
 			CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
 				csid_hw->hw_intf->hw_idx);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 6ad0934..9689698 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -768,7 +768,10 @@
 		}
 
 		mutex_lock(&tbl.bufq[i].q_lock);
-		ion_free(tbl.client, tbl.bufq[i].i_hdl);
+		if (tbl.bufq[i].i_hdl) {
+			ion_free(tbl.client, tbl.bufq[i].i_hdl);
+			tbl.bufq[i].i_hdl = NULL;
+		}
 		tbl.bufq[i].fd = -1;
 		tbl.bufq[i].flags = 0;
 		tbl.bufq[i].buf_handle = -1;
@@ -813,7 +816,17 @@
 		return -EINVAL;
 	}
 
-	CAM_DBG(CAM_CRM, "Flags = %X", tbl.bufq[idx].flags);
+	CAM_DBG(CAM_CRM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
+
+	mutex_lock(&tbl.m_lock);
+	if ((!tbl.bufq[idx].active) &&
+		(tbl.bufq[idx].vaddr) == 0) {
+		CAM_WARN(CAM_CRM, "Buffer at idx=%d is already unmapped,",
+			idx);
+		mutex_unlock(&tbl.m_lock);
+		return 0;
+	}
+
 
 	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
 		if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
@@ -856,8 +869,11 @@
 	tbl.bufq[idx].is_imported = false;
 	tbl.bufq[idx].len = 0;
 	tbl.bufq[idx].num_hdl = 0;
+	tbl.bufq[idx].active = false;
 	mutex_unlock(&tbl.bufq[idx].q_lock);
-	cam_mem_put_slot(idx);
+	mutex_destroy(&tbl.bufq[idx].q_lock);
+	clear_bit(idx, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
 
 	return rc;
 }
@@ -1043,6 +1059,10 @@
 	}
 
 	if (!tbl.bufq[idx].active) {
+		if (tbl.bufq[idx].vaddr == 0) {
+			CAM_ERR(CAM_CRM, "buffer is released already");
+			return 0;
+		}
 		CAM_ERR(CAM_CRM, "Released buffer state should be active");
 		return -EINVAL;
 	}
@@ -1184,6 +1204,10 @@
 	}
 
 	if (!tbl.bufq[idx].active) {
+		if (tbl.bufq[idx].vaddr == 0) {
+			CAM_ERR(CAM_CRM, "buffer is released already");
+			return 0;
+		}
 		CAM_ERR(CAM_CRM, "Released buffer state should be active");
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index d7a382f..784e90b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -25,23 +25,6 @@
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 
-
-void cam_req_mgr_handle_core_shutdown(void)
-{
-	struct cam_req_mgr_core_session *session;
-	struct cam_req_mgr_core_session *tsession;
-	struct cam_req_mgr_session_info ses_info;
-
-	if (!list_empty(&g_crm_core_dev->session_head)) {
-		list_for_each_entry_safe(session, tsession,
-			&g_crm_core_dev->session_head, entry) {
-			ses_info.session_hdl =
-				session->session_hdl;
-			cam_req_mgr_destroy_session(&ses_info);
-		}
-	}
-}
-
 static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
 {
 	int32_t                  i = 0;
@@ -202,11 +185,18 @@
 
 	if (tbl->inject_delay > 0 && (traverse_data->validate_only == false)) {
 		CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
-		apply_data[tbl->pd].req_id = -1;
 		tbl->inject_delay--;
 		/* This pd table is not ready to proceed with asked idx */
 		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
-		return -EAGAIN;
+		apply_data[tbl->pd].req_id = -1;
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		return rc;
 	}
 
 	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
@@ -241,8 +231,20 @@
 		}
 	} else {
 		/* This pd table is not ready to proceed with asked idx */
-		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
-		return -EAGAIN;
+		if (tbl->slot[curr_idx].state == CRM_REQ_STATE_APPLIED)
+			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+		else
+			SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+
+		apply_data[tbl->pd].req_id = -1;
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		return rc;
 	}
 	return 0;
 }
@@ -472,12 +474,14 @@
  *                  traversed through
  * @idx           : index within input request queue
  * @validate_only : Whether to validate only and/or update settings
+ * @result        : Holds the value that indicates which of the pd
+ *                  tables have a req that is ready to be applied
  *
  * @return   : 0 for success, negative for failure
  *
  */
 static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
-	int32_t idx, bool validate_only)
+	int32_t idx, bool validate_only, int *result)
 {
 	int                            rc;
 	struct cam_req_mgr_traverse    traverse_data;
@@ -508,15 +512,18 @@
 	CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
 		idx, traverse_data.result, link->pd_mask, rc);
 
-	if (!rc && traverse_data.result == link->pd_mask) {
+	if (!traverse_data.result)
+		return -EAGAIN;
+
+	if (!rc) {
 		CAM_DBG(CAM_CRM,
 			"APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
 			link->link_hdl, idx,
 			apply_data[2].req_id, apply_data[1].req_id,
 			apply_data[0].req_id);
-	} else
-		rc = -EAGAIN;
+	}
 
+	*result = traverse_data.result;
 	return rc;
 }
 
@@ -645,12 +652,16 @@
  * @link     : pointer to link whose input queue and req tbl are
  *             traversed through
  * @slot     : pointer to the current slot being processed
+ * @result   : Holds the value that indicates which of the pd
+ *             tables have a req that is ready to be applied
+ *
  * @return   : 0 for success, negative for failure
  *
  */
 static int __cam_req_mgr_process_sync_req(
 	struct cam_req_mgr_core_link *link,
-	struct cam_req_mgr_slot *slot)
+	struct cam_req_mgr_slot *slot,
+	int *result)
 {
 	struct cam_req_mgr_core_link *sync_link = NULL;
 	int64_t req_id = 0;
@@ -675,7 +686,7 @@
 		link->sof_counter++;
 	}
 
-	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true);
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true, result);
 	if (rc) {
 		CAM_DBG(CAM_CRM,
 			"Req: %lld [My link]not available link: %x, rc=%d",
@@ -687,7 +698,7 @@
 		sync_link->req.in_q, req_id);
 	if (sync_slot_idx != -1) {
 		rc = __cam_req_mgr_check_link_is_ready(
-			sync_link, sync_slot_idx, true);
+			sync_link, sync_slot_idx, true, result);
 		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, status=%d, rc=%d",
 			sync_slot_idx,
 			sync_link->req.in_q->slot[sync_slot_idx].status,
@@ -698,8 +709,8 @@
 	}
 
 	if ((sync_slot_idx != -1) &&
-	((sync_link->req.in_q->slot[sync_slot_idx].status ==
-	CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
+		((sync_link->req.in_q->slot[sync_slot_idx].status ==
+		CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
 		rc = __cam_req_mgr_validate_sof_cnt(link, sync_link);
 		if (rc) {
 			CAM_DBG(CAM_CRM,
@@ -707,7 +718,8 @@
 				req_id, sync_link->link_hdl);
 			goto failure;
 		}
-		__cam_req_mgr_check_link_is_ready(link, slot->idx, false);
+		__cam_req_mgr_check_link_is_ready(link, slot->idx, false,
+			result);
 	} else {
 		CAM_DBG(CAM_CRM,
 			"Req: %lld [Other link] not ready to apply on link: %x",
@@ -724,6 +736,41 @@
 }
 
 /**
+ * __cam_req_mgr_reset_pd_tables()
+ *
+ * @brief    : resets pd tables based on req getting applied on
+ *             from a particular pd table
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @slot     : Pointer to the current slot
+ * @result   : indicates request of which pd table was successfully
+ *             processed
+ *
+ */
+static void __cam_req_mgr_reset_pd_tables(
+	struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_slot *slot,
+	int result)
+{
+	int pd_set_bit = 0;
+	int curr_idx = slot->idx;
+	int no_tables = link->req.num_tbl;
+	int max_pd_delay = link->max_delay;
+	struct cam_req_mgr_req_tbl  *tbl = link->req.l_tbl;
+	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+	while (no_tables) {
+		pd_set_bit = (result & (1 << max_pd_delay));
+		if (pd_set_bit)
+			tbl->slot[curr_idx].state = CRM_REQ_STATE_APPLIED;
+		max_pd_delay--;
+		no_tables--;
+		tbl = tbl->next;
+		__cam_req_mgr_dec_idx(&curr_idx, 1, in_q->num_slots);
+	}
+}
+
+/**
  * __cam_req_mgr_process_req()
  *
  * @brief    : processes read index in request queue and traverse through table
@@ -736,7 +783,7 @@
 static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 	uint32_t trigger)
 {
-	int                                  rc = 0, idx;
+	int                                  rc = 0, idx, result = 0;
 	struct cam_req_mgr_slot             *slot = NULL;
 	struct cam_req_mgr_req_queue        *in_q;
 	struct cam_req_mgr_core_session     *session;
@@ -782,10 +829,11 @@
 		}
 
 		if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)
-			rc = __cam_req_mgr_process_sync_req(link, slot);
+			rc = __cam_req_mgr_process_sync_req(link, slot,
+				&result);
 		else
 			rc = __cam_req_mgr_check_link_is_ready(link,
-				slot->idx, false);
+				slot->idx, false, &result);
 
 		if (rc < 0) {
 			/*
@@ -830,16 +878,25 @@
 		spin_unlock_bh(&link->link_state_spin_lock);
 
 		if (link->trigger_mask == link->subscribe_event) {
-			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+			if (result == link->pd_mask) {
+				slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+				CAM_DBG(CAM_CRM, "req %d is applied on link %d",
+					slot->req_id, link->link_hdl);
+				idx = in_q->rd_idx;
+				__cam_req_mgr_dec_idx(
+					&idx, link->max_delay + 1,
+					in_q->num_slots);
+				__cam_req_mgr_reset_req_slot(link, idx);
+			} else {
+				CAM_DBG(CAM_CRM,
+					"Req:%lld not applied on all devices",
+				slot->req_id);
+				__cam_req_mgr_reset_pd_tables(link, slot,
+					result);
+				slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+			}
+
 			link->trigger_mask = 0;
-			CAM_DBG(CAM_CRM, "req %d is applied on link %d",
-				slot->req_id,
-				link->link_hdl);
-			idx = in_q->rd_idx;
-			__cam_req_mgr_dec_idx(
-				&idx, link->max_delay + 1,
-				in_q->num_slots);
-			__cam_req_mgr_reset_req_slot(link, idx);
 		}
 	}
 	mutex_unlock(&session->lock);
@@ -2028,6 +2085,22 @@
 	return rc;
 }
 
+void cam_req_mgr_handle_core_shutdown(void)
+{
+	struct cam_req_mgr_core_session *session;
+	struct cam_req_mgr_core_session *tsession;
+	struct cam_req_mgr_session_info ses_info;
+
+	if (!list_empty(&g_crm_core_dev->session_head)) {
+		list_for_each_entry_safe(session, tsession,
+			&g_crm_core_dev->session_head, entry) {
+			ses_info.session_hdl =
+				session->session_hdl;
+			cam_req_mgr_destroy_session(&ses_info);
+		}
+	}
+}
+
 /* IOCTLs handling section */
 int cam_req_mgr_create_session(
 	struct cam_req_mgr_session_info *ses_info)
@@ -2314,21 +2387,23 @@
 
 	if (!sched_req) {
 		CAM_ERR(CAM_CRM, "csl_req is NULL");
-		rc = -EINVAL;
-		goto end;
+		return -EINVAL;
 	}
 
+	mutex_lock(&g_crm_core_dev->crm_lock);
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sched_req->link_hdl);
 	if (!link) {
 		CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
 	}
 
 	session = (struct cam_req_mgr_core_session *)link->parent;
 	if (!session) {
 		CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
 	}
 
 	CAM_DBG(CAM_CRM, "link %x req %lld, sync_mode %d",
@@ -2351,6 +2426,7 @@
 	CAM_DBG(CAM_CRM, "DONE dev %x req %lld sync_mode %d",
 		sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode);
 end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
 
@@ -2373,11 +2449,13 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&g_crm_core_dev->crm_lock);
 	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(sync_info->session_hdl);
 	if (!cam_session) {
 		CAM_ERR(CAM_CRM, "NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
 
@@ -2414,6 +2492,7 @@
 
 done:
 	mutex_unlock(&cam_session->lock);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
 
@@ -2439,6 +2518,7 @@
 		goto end;
 	}
 
+	mutex_lock(&g_crm_core_dev->crm_lock);
 	/* session hdl's priv data is cam session struct */
 	session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(flush_info->session_hdl);
@@ -2482,6 +2562,7 @@
 		&link->workq_comp,
 		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
 end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 42f8c77..4511a5d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -79,12 +79,14 @@
  * EMPTY   : indicates req slot is empty
  * PENDING : indicates req slot is waiting for reqs from all devs
  * READY   : indicates req slot is ready to be sent to devs
+ * APPLIED : indicates req slot is already sent to devs
  * INVALID : indicates req slot is not in valid state
  */
 enum crm_req_state {
 	CRM_REQ_STATE_EMPTY,
 	CRM_REQ_STATE_PENDING,
 	CRM_REQ_STATE_READY,
+	CRM_REQ_STATE_APPLIED,
 	CRM_REQ_STATE_INVALID,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
index 2aa2ab1..2189202 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -82,7 +82,7 @@
 {
 	CAM_DBG(CAM_CRM, "destroy timer %pK", *crm_timer);
 	if (*crm_timer) {
-		del_timer(&(*crm_timer)->sys_timer);
+		del_timer_sync(&(*crm_timer)->sys_timer);
 		kfree(*crm_timer);
 		*crm_timer = NULL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index febf922..079f5bb 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -33,6 +33,7 @@
 	power_info->power_setting[0].seq_type = SENSOR_VAF;
 	power_info->power_setting[0].seq_val = CAM_VAF;
 	power_info->power_setting[0].config_val = 1;
+	power_info->power_setting[0].delay = 2;
 
 	power_info->power_down_setting_size = 1;
 	power_info->power_down_setting =
@@ -67,6 +68,18 @@
 		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
 	power_info = &soc_private->power_info;
 
+	if ((power_info->power_setting == NULL) &&
+		(power_info->power_down_setting == NULL)) {
+		CAM_INFO(CAM_ACTUATOR,
+			"Using default power settings");
+		rc = cam_actuator_construct_default_power_setting(power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Construct default actuator power setting failed.");
+			return rc;
+		}
+	}
+
 	/* Parse and fill vreg params for power up settings */
 	rc = msm_camera_fill_vreg_params(
 		&a_ctrl->soc_info,
@@ -93,16 +106,14 @@
 
 	rc = cam_sensor_core_power_up(power_info, soc_info);
 	if (rc) {
-		CAM_ERR(CAM_ACTUATOR, "failed in ois power up rc %d", rc);
+		CAM_ERR(CAM_ACTUATOR,
+			"failed in actuator power up rc %d", rc);
 		return rc;
 	}
 
-	/* VREG needs some delay to power up */
-	usleep_range(2000, 2050);
-
 	rc = camera_io_init(&a_ctrl->io_master_info);
 	if (rc < 0)
-		CAM_ERR(CAM_ACTUATOR, "cci_init failed: rc: %d", rc);
+		CAM_ERR(CAM_ACTUATOR, "cci init failed: rc: %d", rc);
 
 	return rc;
 }
@@ -115,7 +126,7 @@
 	struct cam_actuator_soc_private  *soc_private;
 
 	if (!a_ctrl) {
-		CAM_ERR(CAM_ACTUATOR, "failed: e_ctrl %pK", a_ctrl);
+		CAM_ERR(CAM_ACTUATOR, "failed: a_ctrl %pK", a_ctrl);
 		return -EINVAL;
 	}
 
@@ -369,23 +380,34 @@
 int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
 	void *arg)
 {
-	int32_t rc = 0;
+	int32_t  rc = 0;
+	int32_t  i = 0;
+	uint32_t total_cmd_buf_in_bytes = 0;
+	size_t   len_of_buff = 0;
+	uint32_t *offset = NULL;
+	uint32_t *cmd_buf = NULL;
 	uint64_t generic_ptr;
-	struct cam_control *ioctl_ctrl = NULL;
-	struct cam_packet *csl_packet = NULL;
+	struct common_header      *cmm_hdr = NULL;
+	struct cam_control        *ioctl_ctrl = NULL;
+	struct cam_packet         *csl_packet = NULL;
 	struct cam_config_dev_cmd config;
-	struct i2c_data_settings *i2c_data = NULL;
+	struct i2c_data_settings  *i2c_data = NULL;
 	struct i2c_settings_array *i2c_reg_settings = NULL;
-	struct cam_cmd_buf_desc *cmd_desc = NULL;
-	size_t len_of_buff = 0;
-	uint32_t *offset = NULL, *cmd_buf;
-	struct cam_req_mgr_add_request add_req;
+	struct cam_cmd_buf_desc   *cmd_desc = NULL;
+	struct cam_req_mgr_add_request  add_req;
+	struct cam_actuator_soc_private *soc_private = NULL;
+	struct cam_sensor_power_ctrl_t  *power_info = NULL;
 
 	if (!a_ctrl || !arg) {
 		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+
+	power_info = &soc_private->power_info;
+
 	ioctl_ctrl = (struct cam_control *)arg;
 	if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
 		sizeof(config)))
@@ -405,53 +427,99 @@
 		return -EINVAL;
 	}
 
-	csl_packet = (struct cam_packet *)(generic_ptr +
-		config.offset);
+	csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
 	CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
 
-	if ((csl_packet->header.op_code & 0xFFFFFF) ==
-			CAM_ACTUATOR_PACKET_OPCODE_INIT) {
-		i2c_data = &(a_ctrl->i2c_data);
-		i2c_reg_settings = &i2c_data->init_settings;
-
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_ACTUATOR_PACKET_OPCODE_INIT:
 		offset = (uint32_t *)&csl_packet->payload;
 		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 
-		if (csl_packet->num_cmd_buf != 2) {
-			CAM_ERR(CAM_ACTUATOR, "cmd Buffers in Init : %d",
-				csl_packet->num_cmd_buf);
-			return -EINVAL;
+		/* Loop through multiple command buffers */
+		for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			if (!total_cmd_buf_in_bytes)
+				continue;
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+					(uint64_t *)&generic_ptr, &len_of_buff);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
+				return rc;
+			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_ACTUATOR, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+			cmm_hdr = (struct common_header *)cmd_buf;
+
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received slave info buffer");
+				rc = cam_actuator_slaveInfo_pkt_parser(
+					a_ctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed to parse slave info: %d", rc);
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received power settings buffer");
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					power_info);
+				if (rc) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed:parse power settings: %d",
+					rc);
+					return rc;
+				}
+				break;
+			default:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received initSettings buffer");
+				i2c_data = &(a_ctrl->i2c_data);
+				i2c_reg_settings =
+					&i2c_data->init_settings;
+
+				i2c_reg_settings->request_id = 0;
+				i2c_reg_settings->is_settings_valid = 1;
+				rc = cam_sensor_i2c_command_parser(
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed:parse init settings: %d",
+					rc);
+					return rc;
+				}
+				break;
+			}
 		}
 
-		rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buff);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
-			return rc;
-		}
-		cmd_buf = (uint32_t *)generic_ptr;
-		cmd_buf += cmd_desc->offset / sizeof(uint32_t);
-		rc = cam_actuator_slaveInfo_pkt_parser(a_ctrl, cmd_buf);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Failed in parsing the pkt");
-			return rc;
-		}
-		cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
-		i2c_data->init_settings.request_id = 0;
-		i2c_reg_settings->is_settings_valid = 1;
-		rc = cam_sensor_i2c_command_parser(i2c_reg_settings,
-			&cmd_desc[1], 1);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
-				rc);
-			return rc;
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
+			rc = cam_actuator_power_up(a_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					" Actuator Power up failed");
+				return rc;
+			}
+			a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
 		}
 
 		rc = cam_actuator_apply_settings(a_ctrl,
 			&a_ctrl->i2c_data.init_settings);
-		if (rc < 0)
+		if (rc < 0) {
 			CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
+			return rc;
+		}
 
 		/* Delete the request even if the apply is failed */
 		rc = delete_request(&a_ctrl->i2c_data.init_settings);
@@ -460,10 +528,16 @@
 				"Fail in deleting the Init settings");
 			rc = 0;
 		}
-	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
-		CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS) {
-		a_ctrl->setting_apply_state =
-			ACT_APPLY_SETTINGS_NOW;
+		break;
+	case CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS:
+		if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+				"Not in right state to move lens: %d",
+				a_ctrl->cam_act_state);
+			return rc;
+		}
+		a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_NOW;
 
 		i2c_data = &(a_ctrl->i2c_data);
 		i2c_reg_settings = &i2c_data->init_settings;
@@ -477,16 +551,22 @@
 		rc = cam_sensor_i2c_command_parser(i2c_reg_settings,
 			cmd_desc, 1);
 		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
-				rc);
+			CAM_ERR(CAM_ACTUATOR,
+				"Auto move lens parsing failed: %d", rc);
 			return rc;
 		}
-	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
-		CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS) {
+		break;
+	case CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS:
+		if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+				"Not in right state to move lens: %d",
+				a_ctrl->cam_act_state);
+			return rc;
+		}
 		i2c_data = &(a_ctrl->i2c_data);
-		i2c_reg_settings =
-			&i2c_data->per_frame
-			[csl_packet->header.request_id % MAX_PER_FRAME_ARRAY];
+		i2c_reg_settings = &i2c_data->per_frame[
+			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY];
 
 		i2c_data->init_settings.request_id =
 			csl_packet->header.request_id;
@@ -497,10 +577,11 @@
 		rc = cam_sensor_i2c_command_parser(i2c_reg_settings,
 			cmd_desc, 1);
 		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
-				rc);
+			CAM_ERR(CAM_ACTUATOR,
+				"Manual move lens parsing failed: %d", rc);
 			return rc;
 		}
+		break;
 	}
 
 	if ((csl_packet->header.op_code & 0xFFFFFF) !=
@@ -526,12 +607,13 @@
 	if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
 		return;
 
-	if ((a_ctrl->cam_act_state == CAM_ACTUATOR_START) ||
-		(a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE)) {
+	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_CONFIG) {
 		rc = cam_actuator_power_down(a_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+	}
 
+	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) {
 		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "destroying  dhdl failed");
@@ -595,28 +677,24 @@
 			goto release_mutex;
 		}
 
-		rc = cam_actuator_power_up(a_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
-			goto release_mutex;
-		}
-
 		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
 	}
 		break;
 	case CAM_RELEASE_DEV: {
-		if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_ACTUATOR,
-			"Not in right state to release : %d",
-			a_ctrl->cam_act_state);
+				"Cant release actuator: in start state");
 			goto release_mutex;
 		}
 
-		rc = cam_actuator_power_down(a_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
-			goto release_mutex;
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_CONFIG) {
+			rc = cam_actuator_power_down(a_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					"Actuator Power down failed");
+				goto release_mutex;
+			}
 		}
 
 		if (a_ctrl->bridge_intf.device_hdl == -1) {
@@ -648,7 +726,7 @@
 	}
 		break;
 	case CAM_START_DEV: {
-		if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_CONFIG) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_ACTUATOR,
 			"Not in right state to start : %d",
@@ -681,7 +759,7 @@
 						i2c_set->request_id, rc);
 			}
 		}
-		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
+		a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
 	}
 		break;
 	case CAM_CONFIG_DEV: {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index c5c9b0a..96fdfeb 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -183,7 +183,7 @@
 	rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
 	if (rc < 0) {
 		CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
-		goto free_ctrl;
+		goto free_soc;
 	}
 
 	rc = cam_actuator_init_subdev(a_ctrl);
@@ -218,19 +218,10 @@
 
 	v4l2_set_subdevdata(&(a_ctrl->v4l2_dev_str.sd), a_ctrl);
 
-	rc = cam_actuator_construct_default_power_setting(
-		&soc_private->power_info);
-	if (rc < 0) {
-		CAM_ERR(CAM_ACTUATOR,
-			"Construct default actuator power setting failed.");
-		goto free_mem;
-	}
-
 	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 
 	return rc;
-free_mem:
-	kfree(a_ctrl->i2c_data.per_frame);
+
 unreg_subdev:
 	cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
 free_soc:
@@ -311,7 +302,7 @@
 	struct cam_actuator_ctrl_t      *a_ctrl = NULL;
 	struct cam_actuator_soc_private *soc_private = NULL;
 
-	/* Create sensor control structure */
+	/* Create actuator control structure */
 	a_ctrl = devm_kzalloc(&pdev->dev,
 		sizeof(struct cam_actuator_ctrl_t), GFP_KERNEL);
 	if (!a_ctrl)
@@ -379,18 +370,10 @@
 
 	platform_set_drvdata(pdev, a_ctrl);
 	v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
-
-	rc = cam_actuator_construct_default_power_setting(
-		&soc_private->power_info);
-	if (rc < 0) {
-		CAM_ERR(CAM_ACTUATOR,
-			"Construct default actuator power setting failed.");
-		goto unreg_subdev;
-	}
+	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 
 	return rc;
-unreg_subdev:
-	cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
+
 free_mem:
 	kfree(a_ctrl->i2c_data.per_frame);
 free_soc:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 8b8b1ef..c4333a0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -56,6 +56,7 @@
 enum cam_actuator_state {
 	CAM_ACTUATOR_INIT,
 	CAM_ACTUATOR_ACQUIRE,
+	CAM_ACTUATOR_CONFIG,
 	CAM_ACTUATOR_START,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 55b7c72..96dc284 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -46,8 +46,10 @@
 		CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
 			a_ctrl->cci_i2c_master, rc);
 		if ((rc < 0) || (a_ctrl->cci_i2c_master >= MASTER_MAX)) {
-			CAM_ERR(CAM_ACTUATOR, "Wrong info: dt CCI master:%d",
-				a_ctrl->cci_i2c_master);
+			CAM_ERR(CAM_ACTUATOR,
+				"Wrong info: rc: %d, dt CCI master:%d",
+				rc, a_ctrl->cci_i2c_master);
+			rc = -EFAULT;
 			return rc;
 		}
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 6cfb965..ed91250 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -146,16 +146,16 @@
 			base + CCI_RESET_CMD_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
-		CAM_ERR(CAM_CCI, "MASTER_0 error 0x%x", irq);
 		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
 		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
+		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
-		CAM_ERR(CAM_CCI, "MASTER_1 error 0x%x", irq);
 		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
 		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
+		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq);
 	}
 	return IRQ_HANDLED;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 2adca66..262e49c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -140,12 +140,6 @@
 	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
 	csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode;
 
-	if (csiphy_dev->csiphy_info.secure_mode &&
-		(csiphy_dev->config_count == 1))
-		rc = cam_csiphy_notify_secure_mode(
-			csiphy_dev->soc_info.index,
-			CAM_SECURE_MODE_SECURE);
-
 	return rc;
 }
 
@@ -365,6 +359,14 @@
 	if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
 		soc_info = &csiphy_dev->soc_info;
 
+		if (csiphy_dev->csiphy_info.secure_mode)
+			cam_csiphy_notify_secure_mode(
+				csiphy_dev->soc_info.index,
+				CAM_SECURE_MODE_NON_SECURE);
+
+		csiphy_dev->csiphy_info.secure_mode =
+			CAM_SECURE_MODE_NON_SECURE;
+
 		cam_csiphy_reset(csiphy_dev);
 		cam_soc_util_disable_platform_resource(soc_info, true, true);
 
@@ -393,6 +395,43 @@
 	csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
 }
 
+static int32_t cam_csiphy_external_cmd(struct csiphy_device *csiphy_dev,
+	struct cam_config_dev_cmd *p_submit_cmd)
+{
+	struct cam_csiphy_info cam_cmd_csiphy_info;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cam_cmd_csiphy_info,
+		(void __user *)p_submit_cmd->packet_handle,
+		sizeof(struct cam_csiphy_info))) {
+		CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n");
+		rc = -EFAULT;
+	} else {
+		csiphy_dev->csiphy_info.lane_cnt =
+			cam_cmd_csiphy_info.lane_cnt;
+		csiphy_dev->csiphy_info.lane_cnt =
+			cam_cmd_csiphy_info.lane_cnt;
+		csiphy_dev->csiphy_info.lane_mask =
+			cam_cmd_csiphy_info.lane_mask;
+		csiphy_dev->csiphy_info.csiphy_3phase =
+			cam_cmd_csiphy_info.csiphy_3phase;
+		csiphy_dev->csiphy_info.combo_mode =
+			cam_cmd_csiphy_info.combo_mode;
+		csiphy_dev->csiphy_info.settle_time =
+			cam_cmd_csiphy_info.settle_time;
+		csiphy_dev->csiphy_info.data_rate =
+			cam_cmd_csiphy_info.data_rate;
+		CAM_DBG(CAM_CSIPHY,
+			"%s CONFIG_DEV_EXT settle_time= %lld lane_cnt=%d lane_mask=0x%x",
+			__func__,
+			csiphy_dev->csiphy_info.settle_time,
+			csiphy_dev->csiphy_info.lane_cnt,
+			csiphy_dev->csiphy_info.lane_mask);
+	}
+
+	return rc;
+}
+
 int32_t cam_csiphy_core_cfg(void *phy_dev,
 			void *arg)
 {
@@ -516,6 +555,14 @@
 			goto release_mutex;
 		}
 
+		if (csiphy_dev->csiphy_info.secure_mode)
+			cam_csiphy_notify_secure_mode(
+				csiphy_dev->soc_info.index,
+				CAM_SECURE_MODE_NON_SECURE);
+
+		csiphy_dev->csiphy_info.secure_mode =
+			CAM_SECURE_MODE_NON_SECURE;
+
 		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0)
 			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
@@ -560,15 +607,6 @@
 		csiphy_dev->config_count--;
 		csiphy_dev->acquire_count--;
 
-		if (csiphy_dev->csiphy_info.secure_mode &&
-			(!csiphy_dev->config_count)) {
-			csiphy_dev->csiphy_info.secure_mode =
-				CAM_SECURE_MODE_NON_SECURE;
-			rc = cam_csiphy_notify_secure_mode(
-				csiphy_dev->soc_info.index,
-				CAM_SECURE_MODE_NON_SECURE);
-		}
-
 		if (csiphy_dev->acquire_count == 0)
 			csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
 	}
@@ -609,6 +647,15 @@
 			goto release_mutex;
 		}
 
+		if (csiphy_dev->csiphy_info.secure_mode) {
+			rc = cam_csiphy_notify_secure_mode(
+				csiphy_dev->soc_info.index,
+				CAM_SECURE_MODE_SECURE);
+			if (rc < 0)
+				csiphy_dev->csiphy_info.secure_mode =
+					CAM_SECURE_MODE_NON_SECURE;
+		}
+
 		rc = cam_csiphy_enable_hw(csiphy_dev);
 		if (rc != 0) {
 			CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
@@ -629,6 +676,19 @@
 		csiphy_dev->csiphy_state = CAM_CSIPHY_START;
 	}
 		break;
+	case CAM_CONFIG_DEV_EXTERNAL: {
+		struct cam_config_dev_cmd submit_cmd;
+
+		if (copy_from_user(&submit_cmd,
+			(void __user *)cmd->handle,
+			sizeof(struct cam_config_dev_cmd))) {
+			CAM_ERR(CAM_CSIPHY, "failed copy config ext\n");
+			rc = -EFAULT;
+		} else {
+			rc = cam_csiphy_external_cmd(csiphy_dev, &submit_cmd);
+		}
+		break;
+	}
 	default:
 		CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 3abdd80..b0fbead 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -160,7 +160,7 @@
 		power_info->power_setting_size);
 	if (rc) {
 		CAM_ERR(CAM_EEPROM,
-			"failed to fill vreg params for power up rc:%d", rc);
+			"failed to fill power up vreg params rc:%d", rc);
 		return rc;
 	}
 
@@ -171,7 +171,7 @@
 		power_info->power_down_setting_size);
 	if (rc) {
 		CAM_ERR(CAM_EEPROM,
-			"failed to fill vreg params power down rc:%d", rc);
+			"failed to fill power down vreg params  rc:%d", rc);
 		return rc;
 	}
 
@@ -588,17 +588,18 @@
 					sizeof(struct cam_cmd_i2c_info);
 				processed_cmd_buf_in_bytes +=
 					cmd_length_in_bytes;
-				cmd_buf += cmd_length_in_bytes/4;
+				cmd_buf += cmd_length_in_bytes/
+					sizeof(uint32_t);
 				break;
 			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
 			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
-				cmd_length_in_bytes =
-					sizeof(struct cam_cmd_power);
+				cmd_length_in_bytes = total_cmd_buf_in_bytes;
 				rc = cam_sensor_update_power_settings(cmd_buf,
 					cmd_length_in_bytes, power_info);
 				processed_cmd_buf_in_bytes +=
-					total_cmd_buf_in_bytes;
-				cmd_buf += total_cmd_buf_in_bytes/4;
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+					sizeof(uint32_t);
 				if (rc) {
 					CAM_ERR(CAM_EEPROM, "Failed");
 					return rc;
@@ -614,7 +615,7 @@
 					&cmd_length_in_bytes, &num_map);
 				processed_cmd_buf_in_bytes +=
 					cmd_length_in_bytes;
-				cmd_buf += cmd_length_in_bytes/4;
+				cmd_buf += cmd_length_in_bytes/sizeof(uint32_t);
 				break;
 			default:
 				break;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index f3c4811..68c5eea 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -419,8 +419,10 @@
 	e_ctrl->io_master_info.master_type = CCI_MASTER;
 	e_ctrl->io_master_info.cci_client = kzalloc(
 		sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
-	if (!e_ctrl->io_master_info.cci_client)
+	if (!e_ctrl->io_master_info.cci_client) {
+		rc = -ENOMEM;
 		goto free_e_ctrl;
+	}
 
 	soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
 		GFP_KERNEL);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
index c250045..5a6a401 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -295,17 +295,17 @@
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	uint32_t                        temp;
 
+	if (!soc_info->dev) {
+		CAM_ERR(CAM_EEPROM, "Dev is NULL");
+		return -EINVAL;
+	}
+
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
 		CAM_ERR(CAM_EEPROM, "Failed to read DT properties rc : %d", rc);
 		return rc;
 	}
 
-	if (!soc_info->dev) {
-		CAM_ERR(CAM_EEPROM, "Dev is NULL");
-		return -EINVAL;
-	}
-
 	of_node = soc_info->dev->of_node;
 
 	rc = of_property_read_string(of_node, "eeprom-name",
@@ -318,8 +318,9 @@
 	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
 		rc = of_property_read_u32(of_node, "cci-master",
 			&e_ctrl->cci_i2c_master);
-		if (rc < 0) {
+		if (rc < 0 || (e_ctrl->cci_i2c_master >= MASTER_MAX)) {
 			CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+			rc = -EFAULT;
 			return rc;
 		}
 	}
@@ -349,7 +350,7 @@
 				"i2c-freq-mode read fail %d", rc);
 			soc_private->i2c_info.i2c_freq_mode = 0;
 		}
-		if (soc_private->i2c_info.i2c_freq_mode	>= I2C_MAX_MODES) {
+		if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) {
 			CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
 				soc_private->i2c_info.i2c_freq_mode);
 			soc_private->i2c_info.i2c_freq_mode = 0;
@@ -362,7 +363,7 @@
 		soc_info->clk[i] = devm_clk_get(soc_info->dev,
 			soc_info->clk_name[i]);
 		if (!soc_info->clk[i]) {
-			CAM_ERR(CAM_SENSOR, "get failed for %s",
+			CAM_ERR(CAM_EEPROM, "get failed for %s",
 				soc_info->clk_name[i]);
 			rc = -ENOENT;
 			return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index c977fc4..e0d4502 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -36,7 +36,6 @@
 			return rc;
 		}
 		flash_ctrl->is_regulator_enabled = true;
-		flash_ctrl->flash_state = CAM_FLASH_STATE_START;
 	} else if ((!regulator_enable) &&
 		(flash_ctrl->is_regulator_enabled == true)) {
 		rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
@@ -47,7 +46,6 @@
 			return rc;
 		}
 		flash_ctrl->is_regulator_enabled = false;
-		flash_ctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
 	} else {
 		CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
 			flash_ctrl->flash_state);
@@ -68,12 +66,14 @@
 	nrt_settings = &fctrl->nrt_info;
 
 	if (nrt_settings->cmn_attr.cmd_type ==
-		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO) {
 		fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
 	} else if ((nrt_settings->cmn_attr.cmd_type ==
 		CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
 		(nrt_settings->cmn_attr.cmd_type ==
-		CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+		CAMERA_SENSOR_FLASH_CMD_TYPE_RER) ||
+		(nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE)) {
 		fctrl->nrt_info.cmn_attr.is_settings_valid = false;
 		fctrl->nrt_info.cmn_attr.count = 0;
 		fctrl->nrt_info.num_iterations = 0;
@@ -312,6 +312,49 @@
 
 	if (req_id == 0) {
 		if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE) {
+			flash_data = &fctrl->nrt_info;
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+				if (fctrl->flash_state !=
+					CAM_FLASH_STATE_CONFIG) {
+					CAM_WARN(CAM_FLASH,
+					"Cannot apply Start Dev:Prev state: %d",
+					fctrl->flash_state);
+					return rc;
+				}
+				rc = cam_flash_prepare(fctrl, true);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Enable Regulator Failed rc = %d", rc);
+					return rc;
+				}
+				rc = cam_flash_high(fctrl, flash_data);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+						"FLASH ON failed : %d",
+						rc);
+			}
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_OFF) {
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"LED OFF FAILED: %d",
+					rc);
+					return rc;
+				}
+				if ((fctrl->flash_state ==
+					CAM_FLASH_STATE_START) &&
+					(fctrl->is_regulator_enabled == true)) {
+					rc = cam_flash_prepare(fctrl, false);
+					if (rc)
+						CAM_ERR(CAM_FLASH,
+						"Disable Regulator failed: %d",
+						rc);
+				}
+			}
+		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
 			CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
 			flash_data = &fctrl->nrt_info;
 			if (flash_data->opcode ==
@@ -491,11 +534,39 @@
 		cam_flash_info = (struct cam_flash_init *)cmd_buf;
 
 		switch (cam_flash_info->cmd_type) {
-		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT:
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
 			fctrl->flash_type = cam_flash_info->flash_type;
 			fctrl->is_regulator_enabled = false;
 			fctrl->nrt_info.cmn_attr.cmd_type =
-				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT;
+				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO;
+			fctrl->flash_state =
+				CAM_FLASH_STATE_CONFIG;
+			break;
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE:
+			CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+				flash_operation_info =
+					(struct cam_flash_set_on_off *) cmd_buf;
+				fctrl->nrt_info.cmn_attr.count =
+					flash_operation_info->count;
+				fctrl->nrt_info.cmn_attr.request_id = 0;
+				fctrl->nrt_info.opcode =
+					flash_operation_info->opcode;
+				fctrl->nrt_info.cmn_attr.cmd_type =
+					CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
+				for (i = 0;
+					i < flash_operation_info->count; i++)
+					fctrl->nrt_info.led_current_ma[i] =
+					flash_operation_info->led_current_ma[i];
+
+				mutex_lock(&fctrl->flash_wq_mutex);
+				rc = cam_flash_apply_setting(fctrl, 0);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+						"Apply setting failed: %d",
+						rc);
+				mutex_unlock(&fctrl->flash_wq_mutex);
+				fctrl->flash_state =
+					CAM_FLASH_STATE_CONFIG;
 			break;
 		default:
 			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
@@ -541,28 +612,35 @@
 		case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
 			CAM_DBG(CAM_FLASH,
 				"CAMERA_FLASH_CMD_TYPE_OPS case called");
-			if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+			if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
+				(fctrl->flash_state ==
+					CAM_FLASH_STATE_CONFIG)) {
+				flash_operation_info =
+					(struct cam_flash_set_on_off *) cmd_buf;
+				if (!flash_operation_info) {
+					CAM_ERR(CAM_FLASH,
+						"flash_operation_info Null");
+					return -EINVAL;
+				}
+
+				fctrl->per_frame[frame_offset].opcode =
+					flash_operation_info->opcode;
+				fctrl->per_frame[frame_offset].cmn_attr.count =
+					flash_operation_info->count;
+				for (i = 0;
+					i < flash_operation_info->count; i++)
+					fctrl->per_frame[frame_offset].
+						led_current_ma[i]
+						= flash_operation_info->
+						led_current_ma[i];
+
+			} else {
 				CAM_ERR(CAM_FLASH,
 					"Rxed Update packets without linking");
 				fctrl->per_frame[frame_offset].
 					cmn_attr.is_settings_valid = false;
 				return -EINVAL;
 			}
-			flash_operation_info =
-				(struct cam_flash_set_on_off *) cmd_buf;
-			if (!flash_operation_info) {
-				CAM_ERR(CAM_FLASH, "flash_operation_info Null");
-				return -EINVAL;
-			}
-
-			fctrl->per_frame[frame_offset].opcode =
-				flash_operation_info->opcode;
-			fctrl->per_frame[frame_offset].cmn_attr.count =
-				flash_operation_info->count;
-			for (i = 0; i < flash_operation_info->count; i++)
-				fctrl->per_frame[frame_offset].led_current_ma[i]
-					= flash_operation_info->
-					led_current_ma[i];
 			break;
 		}
 		default:
@@ -663,17 +741,18 @@
 		break;
 	}
 	case CAM_PKT_NOP_OPCODE: {
-		if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_CONFIG)) {
+			CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+				csl_packet->header.request_id);
+			goto update_req_mgr;
+		} else {
 			CAM_ERR(CAM_FLASH,
 				"Rxed Update packets without linking");
 			fctrl->per_frame[frame_offset].
 				cmn_attr.is_settings_valid = false;
 			return -EINVAL;
 		}
-
-		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
-			csl_packet->header.request_id);
-		goto update_req_mgr;
 	}
 	default:
 		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
@@ -795,15 +874,13 @@
 	if (fctrl->flash_state == CAM_FLASH_STATE_INIT)
 		return;
 
-	if (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
-		cam_flash_release_dev(fctrl);
-		return;
+	if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+		(fctrl->flash_state == CAM_FLASH_STATE_START)) {
+		rc = cam_flash_stop_dev(fctrl);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
 	}
 
-	rc = cam_flash_stop_dev(fctrl);
-	if (rc)
-		CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
-
 	rc = cam_flash_release_dev(fctrl);
 	if (rc)
 		CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index e00d4fd..eddbf97 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -131,7 +131,7 @@
 	}
 	case CAM_START_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_START_DEV");
-		if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+		if (fctrl->flash_state != CAM_FLASH_STATE_CONFIG) {
 			CAM_WARN(CAM_FLASH,
 				"Cannot apply Start Dev: Prev state: %d",
 				fctrl->flash_state);
@@ -154,6 +154,7 @@
 		break;
 	}
 	case CAM_STOP_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_STOP_DEV ENTER");
 		if (fctrl->flash_state != CAM_FLASH_STATE_START) {
 			CAM_WARN(CAM_FLASH,
 				"Cannot apply Stop dev: Prev state is: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index a1f8f67..92726a9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -50,6 +50,7 @@
 enum cam_flash_state {
 	CAM_FLASH_STATE_INIT,
 	CAM_FLASH_STATE_ACQUIRE,
+	CAM_FLASH_STATE_CONFIG,
 	CAM_FLASH_STATE_START,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 4e4b112..76f5b46 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -35,6 +35,7 @@
 	power_info->power_setting[0].seq_type = SENSOR_VAF;
 	power_info->power_setting[0].seq_val = CAM_VAF;
 	power_info->power_setting[0].config_val = 1;
+	power_info->power_setting[0].delay = 2;
 
 	power_info->power_down_setting_size = 1;
 	power_info->power_down_setting =
@@ -112,9 +113,21 @@
 		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
 	power_info = &soc_private->power_info;
 
+	if ((power_info->power_setting == NULL) &&
+		(power_info->power_down_setting == NULL)) {
+		CAM_INFO(CAM_OIS,
+			"Using default power settings");
+		rc = cam_ois_construct_default_power_setting(power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS,
+				"Construct default ois power setting failed.");
+			return rc;
+		}
+	}
+
 	/* Parse and fill vreg params for power up settings */
 	rc = msm_camera_fill_vreg_params(
-		&o_ctrl->soc_info,
+		soc_info,
 		power_info->power_setting,
 		power_info->power_setting_size);
 	if (rc) {
@@ -125,12 +138,12 @@
 
 	/* Parse and fill vreg params for power down settings*/
 	rc = msm_camera_fill_vreg_params(
-		&o_ctrl->soc_info,
+		soc_info,
 		power_info->power_down_setting,
 		power_info->power_down_setting_size);
 	if (rc) {
 		CAM_ERR(CAM_OIS,
-			"failed to fill vreg params power down rc:%d", rc);
+			"failed to fill vreg params for power down rc:%d", rc);
 		return rc;
 	}
 
@@ -142,9 +155,6 @@
 		return rc;
 	}
 
-	/* VREG needs some delay to power up */
-	usleep_range(2000, 2050);
-
 	rc = camera_io_init(&o_ctrl->io_master_info);
 	if (rc)
 		CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc);
@@ -152,6 +162,12 @@
 	return rc;
 }
 
+/**
+ * cam_ois_power_down - power down OIS device
+ * @o_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
 static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
 {
 	int32_t                         rc = 0;
@@ -392,6 +408,9 @@
 static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
 {
 	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	uint32_t                        total_cmd_buf_in_bytes = 0;
+	struct common_header           *cmm_hdr = NULL;
 	uint64_t                        generic_ptr;
 	struct cam_control             *ioctl_ctrl = NULL;
 	struct cam_config_dev_cmd       dev_config;
@@ -402,6 +421,9 @@
 	struct cam_packet              *csl_packet = NULL;
 	size_t                          len_of_buff = 0;
 	uint32_t                       *offset = NULL, *cmd_buf;
+	struct cam_ois_soc_private     *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info = &soc_private->power_info;
 
 	ioctl_ctrl = (struct cam_control *)arg;
 	if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
@@ -430,52 +452,94 @@
 		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 
-		if ((csl_packet->num_cmd_buf < 2) &&
-			(csl_packet->num_cmd_buf > 3)) {
-			CAM_ERR(CAM_OIS, "wrong cmd Buffer count: %d",
-				csl_packet->num_cmd_buf);
-			return -EINVAL;
-		}
+		/* Loop through multiple command buffers */
+		for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			if (!total_cmd_buf_in_bytes)
+				continue;
 
-		rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buff);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "Failed to get cpu buf");
-			return rc;
-		}
-
-		cmd_buf = (uint32_t *)generic_ptr;
-		cmd_buf += cmd_desc->offset / sizeof(uint32_t);
-		rc = cam_ois_slaveInfo_pkt_parser(o_ctrl, cmd_buf);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "Failed in parsing the pkt");
-			return rc;
-		}
-
-		cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
-
-		i2c_reg_settings = &(o_ctrl->i2c_init_data);
-		i2c_reg_settings->is_settings_valid = 1;
-		i2c_reg_settings->request_id = 0;
-		rc = cam_sensor_i2c_command_parser(i2c_reg_settings,
-			&cmd_desc[1], 1);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "OIS pkt parsing failed: %d",
-				rc);
-			return rc;
-		}
-
-		if (o_ctrl->is_ois_calib) {
-			i2c_reg_settings = &(o_ctrl->i2c_calib_data);
-			i2c_reg_settings->is_settings_valid = 1;
-			i2c_reg_settings->request_id = 0;
-			rc = cam_sensor_i2c_command_parser(i2c_reg_settings,
-				&cmd_desc[2], 1);
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				(uint64_t *)&generic_ptr, &len_of_buff);
 			if (rc < 0) {
-				CAM_ERR(CAM_OIS,
-					"OIS pkt parsing failed: %d", rc);
+				CAM_ERR(CAM_OIS, "Failed to get cpu buf");
 				return rc;
 			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_OIS, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc->offset / sizeof(uint32_t);
+			cmm_hdr = (struct common_header *)cmd_buf;
+
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				rc = cam_ois_slaveInfo_pkt_parser(
+					o_ctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+					"Failed in parsing slave info");
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_OIS,
+					"Received power settings buffer");
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					power_info);
+				if (rc) {
+					CAM_ERR(CAM_OIS,
+					"Failed: parse power settings");
+					return rc;
+				}
+				break;
+			default:
+			if (o_ctrl->i2c_init_data.is_settings_valid == 0) {
+				CAM_DBG(CAM_OIS,
+				"Received init settings");
+				i2c_reg_settings =
+					&(o_ctrl->i2c_init_data);
+				i2c_reg_settings->is_settings_valid = 1;
+				i2c_reg_settings->request_id = 0;
+				rc = cam_sensor_i2c_command_parser(
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+					"init parsing failed: %d", rc);
+					return rc;
+				}
+			} else if ((o_ctrl->is_ois_calib != 0) &&
+				(o_ctrl->i2c_calib_data.
+					is_settings_valid == 0)) {
+				CAM_DBG(CAM_OIS,
+					"Received calib settings");
+				i2c_reg_settings = &(o_ctrl->i2c_calib_data);
+				i2c_reg_settings->is_settings_valid = 1;
+				i2c_reg_settings->request_id = 0;
+				rc = cam_sensor_i2c_command_parser(
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+						"Calib parsing failed: %d", rc);
+					return rc;
+				}
+			}
+			break;
+			}
+		}
+
+		if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
+			rc = cam_ois_power_up(o_ctrl);
+			if (rc) {
+				CAM_ERR(CAM_OIS, " OIS Power up failed");
+				return rc;
+			}
+			o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
 		}
 
 		if (o_ctrl->ois_fw_flag) {
@@ -515,6 +579,13 @@
 		}
 		break;
 	case CAM_OIS_PACKET_OPCODE_OIS_CONTROL:
+		if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+				"Not in right state to control OIS: %d",
+				o_ctrl->cam_ois_state);
+			return rc;
+		}
 		offset = (uint32_t *)&csl_packet->payload;
 		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
@@ -555,12 +626,13 @@
 	if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
 		return;
 
-	if ((o_ctrl->cam_ois_state == CAM_OIS_START) ||
-		(o_ctrl->cam_ois_state == CAM_OIS_ACQUIRE)) {
+	if (o_ctrl->cam_ois_state >= CAM_OIS_CONFIG) {
 		rc = cam_ois_power_down(o_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "OIS Power down failed");
+	}
 
+	if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) {
 		rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "destroying the device hdl");
@@ -611,16 +683,10 @@
 			goto release_mutex;
 		}
 
-		rc = cam_ois_power_up(o_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_OIS, " OIS Power up failed");
-			goto release_mutex;
-		}
-
 		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
 		break;
 	case CAM_START_DEV:
-		if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+		if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_OIS,
 			"Not in right state for start : %d",
@@ -637,18 +703,19 @@
 		}
 		break;
 	case CAM_RELEASE_DEV:
-		if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+		if (o_ctrl->cam_ois_state == CAM_OIS_START) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_OIS,
-			"Not in right state for release : %d",
-			o_ctrl->cam_ois_state);
+				"Cant release ois: in start state");
 			goto release_mutex;
 		}
 
-		rc = cam_ois_power_down(o_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "OIS Power down failed");
-			goto release_mutex;
+		if (o_ctrl->cam_ois_state == CAM_OIS_CONFIG) {
+			rc = cam_ois_power_down(o_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_OIS, "OIS Power down failed");
+				goto release_mutex;
+			}
 		}
 
 		if (o_ctrl->bridge_intf.device_hdl == -1) {
@@ -673,7 +740,7 @@
 			"Not in right state for stop : %d",
 			o_ctrl->cam_ois_state);
 		}
-		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+		o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
 		break;
 	default:
 		CAM_ERR(CAM_OIS, "invalid opcode");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index 5e1d719..d742acf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -197,27 +197,19 @@
 	rc = cam_ois_driver_soc_init(o_ctrl);
 	if (rc) {
 		CAM_ERR(CAM_OIS, "failed: cam_sensor_parse_dt rc %d", rc);
-		goto octrl_free;
+		goto soc_free;
 	}
 
 	rc = cam_ois_init_subdev_param(o_ctrl);
 	if (rc)
-		goto octrl_free;
-
-	rc = cam_ois_construct_default_power_setting(
-		&soc_private->power_info);
-	if (rc < 0) {
-		CAM_ERR(CAM_OIS,
-			"Construct default ois power setting failed.");
-		goto unreg_subdev;
-	}
+		goto soc_free;
 
 	o_ctrl->cam_ois_state = CAM_OIS_INIT;
 
 	return rc;
 
-unreg_subdev:
-	cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
+soc_free:
+	kfree(soc_private);
 octrl_free:
 	kfree(o_ctrl);
 probe_failure:
@@ -285,6 +277,7 @@
 		goto free_cci_client;
 	}
 	o_ctrl->soc_info.soc_private = soc_private;
+	soc_private->power_info.dev  = &pdev->dev;
 
 	INIT_LIST_HEAD(&(o_ctrl->i2c_init_data.list_head));
 	INIT_LIST_HEAD(&(o_ctrl->i2c_calib_data.list_head));
@@ -307,14 +300,6 @@
 	}
 	o_ctrl->bridge_intf.device_hdl = -1;
 
-	rc = cam_ois_construct_default_power_setting(
-		&soc_private->power_info);
-	if (rc < 0) {
-		CAM_ERR(CAM_OIS,
-			"Construct default ois power setting failed.");
-		goto unreg_subdev;
-	}
-
 	platform_set_drvdata(pdev, o_ctrl);
 	v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index 80f1e84..3b7195e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -34,6 +34,7 @@
 enum cam_ois_state {
 	CAM_OIS_INIT,
 	CAM_OIS_ACQUIRE,
+	CAM_OIS_CONFIG,
 	CAM_OIS_START,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index ec37c84..9ce7a21 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -165,38 +165,41 @@
 	}
 
 	case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
-		if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
+			i2c_reg_settings =
+				&i2c_data->
+				per_frame[csl_packet->header.request_id %
+				MAX_PER_FRAME_ARRAY];
+			CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
+			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
+			if (i2c_reg_settings->is_settings_valid == 1) {
+				CAM_ERR(CAM_SENSOR,
+					"Already some pkt in offset req : %lld",
+					csl_packet->header.request_id);
+				rc = delete_request(i2c_reg_settings);
+				if (rc < 0) {
+					CAM_ERR(CAM_SENSOR,
+					"Failed in Deleting the err: %d", rc);
+					return rc;
+				}
+			}
+		} else {
 			CAM_ERR(CAM_SENSOR,
 				"Rxed Update packets without linking");
 			return -EINVAL;
 		}
-		i2c_reg_settings =
-			&i2c_data->
-			per_frame[csl_packet->header.request_id %
-			MAX_PER_FRAME_ARRAY];
-		CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
-			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
-		if (i2c_reg_settings->is_settings_valid == 1) {
-			CAM_ERR(CAM_SENSOR,
-				"Already some pkt in offset req : %lld",
-				csl_packet->header.request_id);
-			rc = delete_request(i2c_reg_settings);
-			if (rc < 0) {
-				CAM_ERR(CAM_SENSOR,
-					"Failed in Deleting the err: %d", rc);
-				return rc;
-			}
-		}
 	break;
 	}
 	case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
-		if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
+			cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+		} else {
 			CAM_ERR(CAM_SENSOR,
 				"Rxed Update packets without linking");
-			return -EINVAL;
+			rc = -EINVAL;
 		}
-
-		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
 		return rc;
 	}
 	default:
@@ -487,22 +490,18 @@
 
 	cam_sensor_release_resource(s_ctrl);
 
-	if ((s_ctrl->sensor_state == CAM_SENSOR_START) ||
-		(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+	if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE)
 		cam_sensor_power_down(s_ctrl);
-		rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
-		if (rc < 0)
-			CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
-		s_ctrl->bridge_intf.device_hdl = -1;
-		s_ctrl->bridge_intf.link_hdl = -1;
-		s_ctrl->bridge_intf.session_hdl = -1;
-		s_ctrl->sensor_state = CAM_SENSOR_PROBE;
-	}
 
-	if (s_ctrl->sensor_state == CAM_SENSOR_PROBE) {
-		kfree(power_info->power_setting);
-		kfree(power_info->power_down_setting);
-	}
+	rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
+	s_ctrl->bridge_intf.device_hdl = -1;
+	s_ctrl->bridge_intf.link_hdl = -1;
+	s_ctrl->bridge_intf.session_hdl = -1;
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
 
 	s_ctrl->sensor_state = CAM_SENSOR_INIT;
 }
@@ -625,7 +624,6 @@
 		rc = cam_sensor_power_up(s_ctrl);
 		if (rc < 0) {
 			CAM_ERR(CAM_SENSOR, "power up failed");
-			cam_sensor_power_down(s_ctrl);
 			kfree(pu);
 			kfree(pd);
 			goto release_mutex;
@@ -708,7 +706,8 @@
 	}
 		break;
 	case CAM_RELEASE_DEV: {
-		if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+		if ((s_ctrl->sensor_state < CAM_SENSOR_ACQUIRE) ||
+			(s_ctrl->sensor_state > CAM_SENSOR_CONFIG)) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_SENSOR,
 			"Not in right state to release : %d",
@@ -755,7 +754,7 @@
 		break;
 	}
 	case CAM_START_DEV: {
-		if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+		if (s_ctrl->sensor_state != CAM_SENSOR_CONFIG) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_SENSOR,
 			"Not in right state to start : %d",
@@ -837,6 +836,7 @@
 					"Fail in deleting the config settings");
 				goto release_mutex;
 			}
+			s_ctrl->sensor_state = CAM_SENSOR_CONFIG;
 			s_ctrl->i2c_data.config_settings.request_id = -1;
 		}
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index 8c49837..624ea29 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -51,8 +51,8 @@
 
 enum cam_sensor_state_t {
 	CAM_SENSOR_INIT,
-	CAM_SENSOR_PROBE,
 	CAM_SENSOR_ACQUIRE,
+	CAM_SENSOR_CONFIG,
 	CAM_SENSOR_START,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index 2e91efc..72ca737 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -50,11 +50,13 @@
 	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
 	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
 	CAMERA_SENSOR_CMD_TYPE_WAIT,
-	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO,
 	CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
 	CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
 	CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
 	CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
+	CAMERA_SENSOR_CMD_TYPE_RD_DATA,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE,
 	CAMERA_SENSOR_CMD_TYPE_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 535264d..82ba24f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -676,14 +676,12 @@
 			struct cam_cmd_power *pwr_cmd =
 				(struct cam_cmd_power *)ptr;
 
-			power_info->
-				power_setting_size +=
-				pwr_cmd->count;
+			power_info->power_setting_size += pwr_cmd->count;
 			scr = ptr + sizeof(struct cam_cmd_power);
 			tot_size = tot_size + sizeof(struct cam_cmd_power);
 
 			if (pwr_cmd->count == 0)
-				CAM_DBG(CAM_SENSOR, "Un expected Command");
+				CAM_WARN(CAM_SENSOR, "Un expected Command");
 
 			for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
 				power_info->
@@ -979,7 +977,7 @@
 		GFP_KERNEL);
 	if (!*pgpio_num_info)
 		return -ENOMEM;
-	gpio_num_info =  *pgpio_num_info;
+	gpio_num_info = *pgpio_num_info;
 
 	rc = of_property_read_u32(of_node, "gpio-vana", &val);
 	if (rc != -EINVAL) {
@@ -1266,6 +1264,8 @@
 		CAM_ERR(CAM_SENSOR,
 			"Cannot set shared pin to active state");
 
+	CAM_DBG(CAM_SENSOR, "power setting size: %d", ctrl->power_setting_size);
+
 	for (index = 0; index < ctrl->power_setting_size; index++) {
 		CAM_DBG(CAM_SENSOR, "index: %d", index);
 		power_setting = &ctrl->power_setting[index];
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index ae9f74c..46e9d5d 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -254,6 +254,12 @@
 		signalable_row = sync_dev->sync_table + list_info->sync_obj;
 
 		spin_lock_bh(&sync_dev->row_spinlocks[list_info->sync_obj]);
+		if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
+			spin_unlock_bh(
+				&sync_dev->row_spinlocks[list_info->sync_obj]);
+			continue;
+		}
+
 		/* Dispatch kernel callbacks if any were registered earlier */
 		list_for_each_entry_safe(sync_cb,
 			temp_sync_cb, &signalable_row->callback_list, list) {
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 826253c..afac68d 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -306,6 +306,10 @@
 	row->state = CAM_SYNC_STATE_INVALID;
 	memset(row, 0, sizeof(*row));
 	clear_bit(idx, sync_dev->bitmap);
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->parents_list);
+	INIT_LIST_HEAD(&row->children_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
index c1fbb2a..1b5fd9f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -22,7 +22,7 @@
 		return -EINVAL;
 
 	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
-	writel_relaxed(data, addr);
+	writel_relaxed_no_log(data, addr);
 
 	return 0;
 }
@@ -35,7 +35,7 @@
 	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 	/* Ensure previous writes are done */
 	wmb();
-	writel_relaxed(data, addr);
+	writel_relaxed_no_log(data, addr);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index c85d255..8eef152 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -198,6 +198,7 @@
 	u32 offset;
 	u32 len;
 	enum sde_rot_regdump_access access;
+	u32 value;
 };
 
 struct sde_rot_lut_cfg {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index fb74dab..916f978 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -422,7 +422,12 @@
 		head = &regdump[i];
 
 		if (head->access == SDE_ROT_REGDUMP_WRITE) {
-			writel_relaxed(1, mdata->sde_io.base + head->offset);
+			if (head->len != 1) {
+				SDEROT_ERR("invalid write len %u\n", head->len);
+				continue;
+			}
+			writel_relaxed(head->value,
+					mdata->sde_io.base + head->offset);
 			/* Make sure write go through */
 			wmb();
 		} else {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c94830a..7c36934 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -538,13 +538,13 @@
 	 * REGDMA RAM should be dump at last.
 	 */
 	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
-		SDE_ROT_REGDUMP_WRITE },
+		SDE_ROT_REGDUMP_WRITE, 1 },
 	{ "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
 		SDE_ROT_REGDUMP_READ },
 	{ "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
 		SDE_ROT_REGDUMP_VBIF },
-	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 0,
-		SDE_ROT_REGDUMP_WRITE },
+	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
+		SDE_ROT_REGDUMP_WRITE, 0 },
 };
 
 struct sde_rot_cdp_params {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index a1ae681..3f7e7bb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1732,7 +1732,7 @@
 		if (ptr_svc->svc.listener_id != lstnr) {
 			pr_warn("Service requested does not exist\n");
 			__qseecom_qseos_fail_return_resp_tz(data, resp,
-					&send_data_rsp, ptr_svc, lstnr);
+					&send_data_rsp, NULL, lstnr);
 			return -ERESTARTSYS;
 		}
 		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 7df312e..e20ddba 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2728,6 +2728,26 @@
 EXPORT_SYMBOL(ipa_start_gsi_channel);
 
 /**
+* ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode
+* @iface - type of vlan capable device
+* @res - query result: true for vlan mode, false for non vlan mode
+*
+* API must be called after ipa_is_ready() returns true, otherwise it will fail
+*
+* Returns: 0 on success, negative on failure
+*/
+int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_is_vlan_mode, iface, res);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(ipa_is_vlan_mode);
+
+/**
  * ipa_get_version_string() - Get string representation of IPA version
  * @ver: IPA version
  *
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 0779f34..f3e62b8 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -420,6 +420,7 @@
 
 	int (*ipa_get_smmu_params)(struct ipa_smmu_in_params *in,
 		struct ipa_smmu_out_params *out);
+	int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
 };
 
 #ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index 2975192..eadd58b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -12,6 +12,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
@@ -123,6 +124,7 @@
  * @ipa_rm_resource_name_prod: IPA resource manager producer resource
  * @ipa_rm_resource_name_cons: IPA resource manager consumer resource
  * @pm_hdl: handle for IPA PM
+ * @is_vlan_mode: does the driver need to work in VLAN mode?
  */
 struct ecm_ipa_dev {
 	struct net_device *net;
@@ -141,6 +143,7 @@
 	enum ipa_rm_resource_name ipa_rm_resource_name_prod;
 	enum ipa_rm_resource_name ipa_rm_resource_name_cons;
 	u32 pm_hdl;
+	bool is_vlan_mode;
 };
 
 static int ecm_ipa_open(struct net_device *net);
@@ -173,7 +176,8 @@
 	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
 static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
-static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+	bool is_vlan_mode);
 static int ecm_ipa_set_device_ethernet_addr
 	(u8 *dev_ethaddr, u8 device_ethaddr[]);
 static enum ecm_ipa_state ecm_ipa_next_state
@@ -283,6 +287,12 @@
 	}
 	ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
 
+	if (ipa_is_vlan_mode(IPA_VLAN_IF_ECM, &ecm_ipa_ctx->is_vlan_mode)) {
+		ECM_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n");
+		goto fail_get_vlan_mode;
+	}
+	ECM_IPA_DEBUG("is vlan mode %d\n", ecm_ipa_ctx->is_vlan_mode);
+
 	result = ecm_ipa_rules_cfg
 		(ecm_ipa_ctx, params->host_ethaddr, params->device_ethaddr);
 	if (result) {
@@ -319,8 +329,9 @@
 
 fail_register_netdev:
 	ecm_ipa_rules_destroy(ecm_ipa_ctx);
-fail_set_device_ethernet:
 fail_rules_cfg:
+fail_get_vlan_mode:
+fail_set_device_ethernet:
 	ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
 fail_netdev_priv:
 	free_netdev(net);
@@ -450,7 +461,8 @@
 	}
 	ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
 
-	retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl);
+	retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl,
+		ecm_ipa_ctx->is_vlan_mode);
 	if (retval) {
 		ECM_IPA_ERROR("fail on ep cfg\n");
 		goto fail;
@@ -606,6 +618,10 @@
 		goto out;
 	}
 
+	if (ecm_ipa_ctx->is_vlan_mode)
+		if (unlikely(skb->protocol != ETH_P_8021Q))
+			ECM_IPA_DEBUG("ether_type != ETH_P_8021Q && vlan\n");
+
 	ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
 	if (ret) {
 		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
@@ -843,6 +859,41 @@
 	ECM_IPA_DEBUG("queue started\n");
 }
 
+static void ecm_ipa_prepare_header_insertion(
+	int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode)
+{
+	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *eth_vlan_hdr;
+
+	ECM_IPA_LOG_ENTRY();
+
+	add_hdr->is_partial = 0;
+	strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
+	add_hdr->is_eth2_ofst_valid = true;
+	add_hdr->eth2_ofst = 0;
+
+	if (is_vlan_mode) {
+		eth_vlan_hdr = (struct vlan_ethhdr *)add_hdr->hdr;
+		memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN);
+		eth_vlan_hdr->h_vlan_encapsulated_proto =
+			htons(eth_type);
+		eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		add_hdr->hdr_len = VLAN_ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_802_1Q;
+	} else {
+		eth_hdr = (struct ethhdr *)add_hdr->hdr;
+		memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+		eth_hdr->h_proto = htons(eth_type);
+		add_hdr->hdr_len = ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
 /**
  * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
  *				Headers will be committed to HW
@@ -859,8 +910,6 @@
 	struct ipa_ioc_add_hdr *hdrs;
 	struct ipa_hdr_add *ipv4_hdr;
 	struct ipa_hdr_add *ipv6_hdr;
-	struct ethhdr *eth_ipv4;
-	struct ethhdr *eth_ipv6;
 	int result = 0;
 
 	ECM_IPA_LOG_ENTRY();
@@ -871,28 +920,17 @@
 		result = -ENOMEM;
 		goto out;
 	}
+
 	ipv4_hdr = &hdrs->hdr[0];
-	eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr;
+	ecm_ipa_prepare_header_insertion(
+		ETH_P_IP, ECM_IPA_IPV4_HDR_NAME,
+		ipv4_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode);
+
 	ipv6_hdr = &hdrs->hdr[1];
-	eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr;
-	strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX);
-	memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN);
-	memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN);
-	eth_ipv4->h_proto = htons(ETH_P_IP);
-	ipv4_hdr->hdr_len = ETH_HLEN;
-	ipv4_hdr->is_partial = 0;
-	ipv4_hdr->is_eth2_ofst_valid = true;
-	ipv4_hdr->eth2_ofst = 0;
-	ipv4_hdr->type = IPA_HDR_L2_ETHERNET_II;
-	strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX);
-	memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN);
-	memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN);
-	eth_ipv6->h_proto = htons(ETH_P_IPV6);
-	ipv6_hdr->hdr_len = ETH_HLEN;
-	ipv6_hdr->is_partial = 0;
-	ipv6_hdr->is_eth2_ofst_valid = true;
-	ipv6_hdr->eth2_ofst = 0;
-	ipv6_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	ecm_ipa_prepare_header_insertion(
+		ETH_P_IPV6, ECM_IPA_IPV6_HDR_NAME,
+		ipv6_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode);
+
 	hdrs->commit = 1;
 	hdrs->num_hdrs = 2;
 	result = ipa_add_hdr(hdrs);
@@ -972,10 +1010,14 @@
 	struct ipa_rx_intf rx_properties = {0};
 	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
 	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
 	int result = 0;
 
 	ECM_IPA_LOG_ENTRY();
 
+	if (ecm_ipa_ctx->is_vlan_mode)
+		hdr_l2_type = IPA_HDR_L2_802_1Q;
+
 	tx_properties.prop = properties;
 	ipv4_property = &tx_properties.prop[0];
 	ipv4_property->ip = IPA_IP_v4;
@@ -983,11 +1025,11 @@
 	strlcpy
 		(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
 		IPA_RESOURCE_NAME_MAX);
-	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv4_property->hdr_l2_type = hdr_l2_type;
 	ipv6_property = &tx_properties.prop[1];
 	ipv6_property->ip = IPA_IP_v6;
 	ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
-	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv6_property->hdr_l2_type = hdr_l2_type;
 	strlcpy
 		(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
 		IPA_RESOURCE_NAME_MAX);
@@ -998,12 +1040,12 @@
 	rx_ipv4_property->ip = IPA_IP_v4;
 	rx_ipv4_property->attrib.attrib_mask = 0;
 	rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
-	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv4_property->hdr_l2_type = hdr_l2_type;
 	rx_ipv6_property = &rx_properties.prop[1];
 	rx_ipv6_property->ip = IPA_IP_v6;
 	rx_ipv6_property->attrib.attrib_mask = 0;
 	rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
-	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
 	rx_properties.num_props = 2;
 
 	result = ipa_register_intf("ecm0", &tx_properties, &rx_properties);
@@ -1336,6 +1378,13 @@
 		goto fail_file;
 	}
 
+	file = debugfs_create_bool("is_vlan_mode", flags_read_only,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->is_vlan_mode);
+	if (!file) {
+		ECM_IPA_ERROR("could not create is_vlan_mode file\n");
+		goto fail_file;
+	}
+
 	ECM_IPA_DEBUG("debugfs entries were created\n");
 	ECM_IPA_LOG_EXIT();
 
@@ -1362,8 +1411,9 @@
 /**
  * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
  *
- *usb_to_ipa_hdl: handle received from ipa_connect
- *ipa_to_usb_hdl: handle received from ipa_connect
+ * @usb_to_ipa_hdl: handle received from ipa_connect
+ * @ipa_to_usb_hdl: handle received from ipa_connect
+ * @is_vlan_mode - should driver work in vlan mode?
  *
  * USB to IPA pipe:
  *  - No de-aggregation
@@ -1374,16 +1424,21 @@
  *  - No aggregation
  *  - Add Ethernet header
  */
-static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl)
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+	bool is_vlan_mode)
 {
 	int result = 0;
 	struct ipa_ep_cfg usb_to_ipa_ep_cfg;
 	struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+	uint8_t hdr_add = 0;
+
 
 	ECM_IPA_LOG_ENTRY();
+	if (is_vlan_mode)
+		hdr_add = VLAN_HLEN;
 	memset(&usb_to_ipa_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
 	usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
-	usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add;
 	usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
 	usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
 	usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
@@ -1395,7 +1450,7 @@
 	}
 	memset(&ipa_to_usb_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
 	ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
-	ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add;
 	ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
 	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
 	if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 90920d9..d274490 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -741,6 +741,10 @@
 		&ipa3_usb_ctx->ttype_ctx[ttype];
 	int result;
 
+	/* create PM resources for the first tethering protocol only */
+	if (ipa3_usb_ctx->num_init_prot > 0)
+		return 0;
+
 	memset(&ttype_ctx->pm_ctx.reg_params, 0,
 		sizeof(ttype_ctx->pm_ctx.reg_params));
 	ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index 2e87bd2..4958c69 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -12,6 +12,7 @@
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/debugfs.h>
 #include <linux/in.h>
 #include <linux/stddef.h>
@@ -162,6 +163,7 @@
  * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails
  * @state_lock: used to protect the state variable.
  * @pm_hdl: handle for IPA PM framework
+ * @is_vlan_mode: should driver work in vlan mode?
  */
 struct rndis_ipa_dev {
 	struct net_device *net;
@@ -191,6 +193,7 @@
 	struct delayed_work xmit_error_delayed_work;
 	spinlock_t state_lock; /* Spinlock for the state variable.*/
 	u32 pm_hdl;
+	bool is_vlan_mode;
 };
 
 /**
@@ -217,19 +220,20 @@
 static void rndis_ipa_tx_timeout(struct net_device *net);
 static int rndis_ipa_stop(struct net_device *net);
 static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx);
-static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb);
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
+	struct rndis_ipa_dev *rndis_ipa_ctx);
 static void rndis_ipa_xmit_error(struct sk_buff *skb);
 static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work);
 static void rndis_ipa_prepare_header_insertion
 	(int eth_type,
 	const char *hdr_name, struct ipa_hdr_add *add_hdr,
-	const void *dst_mac, const void *src_mac);
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode);
 static int rndis_ipa_hdrs_cfg
 	(struct rndis_ipa_dev *rndis_ipa_ctx,
 	const void *dst_mac, const void *src_mac);
 static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
 static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
-static int rndis_ipa_register_properties(char *netdev_name);
+static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode);
 static int rndis_ipa_deregister_properties(char *netdev_name);
 static void rndis_ipa_rm_notify
 	(void *user_data, enum ipa_rm_event event,
@@ -262,7 +266,8 @@
 	(u32 usb_to_ipa_hdl,
 	u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev,
 	u32 max_xfer_size_bytes_to_host, u32 mtu,
-	bool deaggr_enable);
+	bool deaggr_enable,
+	bool is_vlan_mode);
 static int rndis_ipa_set_device_ethernet_addr
 	(u8 *dev_ethaddr,
 	u8 device_ethaddr[]);
@@ -566,6 +571,14 @@
 	}
 	RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
 
+	if (ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS,
+		&rndis_ipa_ctx->is_vlan_mode)) {
+		RNDIS_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n");
+		goto fail_get_vlan_mode;
+	}
+
+	RNDIS_IPA_DEBUG("is_vlan_mode %d\n", rndis_ipa_ctx->is_vlan_mode);
+
 	result = rndis_ipa_hdrs_cfg
 			(rndis_ipa_ctx,
 			params->host_ethaddr,
@@ -576,7 +589,8 @@
 	}
 	RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n");
 
-	result = rndis_ipa_register_properties(net->name);
+	result = rndis_ipa_register_properties(net->name,
+		rndis_ipa_ctx->is_vlan_mode);
 	if (result) {
 		RNDIS_IPA_ERROR("fail on properties set\n");
 		goto fail_register_tx;
@@ -612,8 +626,9 @@
 	rndis_ipa_deregister_properties(net->name);
 fail_register_tx:
 	rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
-fail_set_device_ethernet:
 fail_hdrs_cfg:
+fail_get_vlan_mode:
+fail_set_device_ethernet:
 	rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
 fail_netdev_priv:
 	free_netdev(net);
@@ -728,7 +743,8 @@
 		max_xfer_size_bytes_to_dev,
 		max_xfer_size_bytes_to_host,
 		rndis_ipa_ctx->net->mtu,
-		rndis_ipa_ctx->deaggregation_enable);
+		rndis_ipa_ctx->deaggregation_enable,
+		rndis_ipa_ctx->is_vlan_mode);
 	if (result) {
 		RNDIS_IPA_ERROR("fail on ep cfg\n");
 		goto fail;
@@ -910,7 +926,7 @@
 		goto out;
 	}
 
-	skb = rndis_encapsulate_skb(skb);
+	skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
 	trace_rndis_tx_dp(skb->protocol);
 	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
 	if (ret) {
@@ -1456,6 +1472,7 @@
  *  for IPA->USB pipe
  * src_mac: device MAC (Ethernet) address to be added to packets
  *  for IPA->USB pipe
+ * is_vlan_mode: should driver work in vlan mode?
  *
  * This function shall build the header-insertion block request for a
  * single Ethernet+RNDIS header)
@@ -1468,23 +1485,37 @@
 static void rndis_ipa_prepare_header_insertion(
 	int eth_type,
 	const char *hdr_name, struct ipa_hdr_add *add_hdr,
-	const void *dst_mac, const void *src_mac)
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode)
 {
 	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *eth_vlan_hdr;
 
 	add_hdr->hdr_len = sizeof(rndis_template_hdr);
 	add_hdr->is_partial = false;
 	strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
 
 	memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr));
-	eth_hdr = (struct ethhdr *)(add_hdr->hdr + sizeof(rndis_template_hdr));
-	memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
-	memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
-	eth_hdr->h_proto = htons(eth_type);
-	add_hdr->hdr_len += ETH_HLEN;
 	add_hdr->is_eth2_ofst_valid = true;
 	add_hdr->eth2_ofst = sizeof(rndis_template_hdr);
-	add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+
+	if (is_vlan_mode) {
+		eth_vlan_hdr = (struct vlan_ethhdr *)(add_hdr->hdr +
+			sizeof(rndis_template_hdr));
+		memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN);
+		eth_vlan_hdr->h_vlan_encapsulated_proto = htons(eth_type);
+		eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		add_hdr->hdr_len += VLAN_ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_802_1Q;
+	} else {
+		eth_hdr = (struct ethhdr *)(add_hdr->hdr +
+			sizeof(rndis_template_hdr));
+		memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+		eth_hdr->h_proto = htons(eth_type);
+		add_hdr->hdr_len += ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	}
 }
 
 /**
@@ -1526,10 +1557,10 @@
 	ipv6_hdr = &hdrs->hdr[1];
 	rndis_ipa_prepare_header_insertion
 		(ETH_P_IP, IPV4_HDR_NAME,
-		ipv4_hdr, dst_mac, src_mac);
+		ipv4_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
 	rndis_ipa_prepare_header_insertion
 		(ETH_P_IPV6, IPV6_HDR_NAME,
-		ipv6_hdr, dst_mac, src_mac);
+		ipv6_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
 
 	hdrs->commit = 1;
 	hdrs->num_hdrs = 2;
@@ -1610,6 +1641,7 @@
  * rndis_ipa_register_properties() - set Tx/Rx properties needed
  *  by IPA configuration manager
  * @netdev_name: a string with the name of the network interface device
+ * @is_vlan_mode: should driver work in vlan mode?
  *
  * Register Tx/Rx properties to allow user space configuration (IPA
  * Configuration Manager):
@@ -1628,7 +1660,7 @@
  *   This rules shall be added based on the attribute mask supplied at
  *   this function, that is, always hit rule.
  */
-static int rndis_ipa_register_properties(char *netdev_name)
+static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode)
 {
 	struct ipa_tx_intf tx_properties = {0};
 	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
@@ -1638,10 +1670,14 @@
 	struct ipa_rx_intf rx_properties = {0};
 	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
 	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
 	int result = 0;
 
 	RNDIS_IPA_LOG_ENTRY();
 
+	if (is_vlan_mode)
+		hdr_l2_type = IPA_HDR_L2_802_1Q;
+
 	tx_properties.prop = properties;
 	ipv4_property = &tx_properties.prop[0];
 	ipv4_property->ip = IPA_IP_v4;
@@ -1649,14 +1685,14 @@
 	strlcpy
 		(ipv4_property->hdr_name, IPV4_HDR_NAME,
 		IPA_RESOURCE_NAME_MAX);
-	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv4_property->hdr_l2_type = hdr_l2_type;
 	ipv6_property = &tx_properties.prop[1];
 	ipv6_property->ip = IPA_IP_v6;
 	ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
 	strlcpy
 		(ipv6_property->hdr_name, IPV6_HDR_NAME,
 		IPA_RESOURCE_NAME_MAX);
-	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv6_property->hdr_l2_type = hdr_l2_type;
 	tx_properties.num_props = 2;
 
 	rx_properties.prop = rx_ioc_properties;
@@ -1664,12 +1700,12 @@
 	rx_ipv4_property->ip = IPA_IP_v4;
 	rx_ipv4_property->attrib.attrib_mask = 0;
 	rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD;
-	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv4_property->hdr_l2_type = hdr_l2_type;
 	rx_ipv6_property = &rx_properties.prop[1];
 	rx_ipv6_property->ip = IPA_IP_v6;
 	rx_ipv6_property->attrib.attrib_mask = 0;
 	rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD;
-	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
 	rx_properties.num_props = 2;
 
 	result = ipa_register_intf("rndis0", &tx_properties, &rx_properties);
@@ -1948,12 +1984,14 @@
  * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
  *  an RNDIS header
  * @skb: packet to be encapsulated with the RNDIS header
+ * @rndis_ipa_ctx: main driver context
  *
  * Shall use a template header for RNDIS and update it with the given
  * skb values.
  * Ethernet is expected to be already encapsulate the packet.
  */
-static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb)
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
+	struct rndis_ipa_dev *rndis_ipa_ctx)
 {
 	struct rndis_pkt_hdr *rndis_hdr;
 	int payload_byte_len = skb->len;
@@ -1971,6 +2009,10 @@
 		skb = new_skb;
 	}
 
+	if (rndis_ipa_ctx->is_vlan_mode)
+		if (unlikely(skb->protocol != ETH_P_8021Q))
+			RNDIS_IPA_DEBUG("ether_type != ETH_P_8021Q && vlan\n");
+
 	/* make room at the head of the SKB to put the RNDIS header */
 	rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb,
 					sizeof(rndis_template_hdr));
@@ -2046,6 +2088,8 @@
  * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host
  *  expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG.
  * @mtu: the netdev MTU size, in bytes
+ * @deaggr_enable: should deaggregation be enabled?
+ * @is_vlan_mode: should driver work in vlan mode?
  *
  * USB to IPA pipe:
  *  - de-aggregation
@@ -2064,7 +2108,8 @@
 	u32 max_xfer_size_bytes_to_dev,
 	u32 max_xfer_size_bytes_to_host,
 	u32 mtu,
-	bool deaggr_enable)
+	bool deaggr_enable,
+	bool is_vlan_mode)
 {
 	int result;
 	struct ipa_ep_cfg *usb_to_ipa_ep_cfg;
@@ -2077,6 +2122,20 @@
 		RNDIS_IPA_DEBUG("deaggregation disabled\n");
 	}
 
+	if (is_vlan_mode) {
+		usb_to_ipa_ep_cfg->hdr.hdr_len =
+			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_len =
+			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = VLAN_ETH_HLEN;
+	} else {
+		usb_to_ipa_ep_cfg->hdr.hdr_len =
+			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_len =
+			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = ETH_HLEN;
+	}
+
 	usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev;
 	result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg);
 	if (result) {
@@ -2452,6 +2511,14 @@
 		goto fail_file;
 	}
 
+	file = debugfs_create_bool("is_vlan_mode", flags_read_only,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->is_vlan_mode);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create is_vlan_mode file\n");
+		goto fail_file;
+	}
+
 	RNDIS_IPA_DEBUG("debugfs entries were created\n");
 	RNDIS_IPA_LOG_EXIT();
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index cf96f1a..f994db5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4638,7 +4638,7 @@
 {
 	unsigned long missing;
 
-	char dbg_buff[16] = { 0 };
+	char dbg_buff[32] = { 0 };
 
 	if (sizeof(dbg_buff) < count + 1)
 		return -EFAULT;
@@ -4650,8 +4650,9 @@
 		return -EFAULT;
 	}
 
-	if (count > 0)
-		dbg_buff[count - 1] = '\0';
+	dbg_buff[count] = '\0';
+
+	IPADBG("user input string %s\n", dbg_buff);
 
 	/* Prevent consequent calls from trying to load the FW again. */
 	if (ipa3_is_ready())
@@ -4659,13 +4660,40 @@
 
 	/* Check MHI configuration on MDM devices */
 	if (!ipa3_is_msm_device()) {
+
+		if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) {
+			if (strnstr(dbg_buff, "eth", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] =
+				true;
+			if (strnstr(dbg_buff, "rndis", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] =
+				true;
+			if (strnstr(dbg_buff, "ecm", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] =
+				true;
+
+			/*
+			 * when vlan mode is passed to our dev we expect
+			 * another write
+			 */
+			return count;
+		}
+
+		/* trim ending newline character if any */
+		if (count && (dbg_buff[count - 1] == '\n'))
+			dbg_buff[count - 1] = '\0';
+
 		if (!strcasecmp(dbg_buff, "MHI")) {
 			ipa3_ctx->ipa_config_is_mhi = true;
 			pr_info(
-			"IPA is loading with MHI configuration\n");
-		} else {
+				"IPA is loading with MHI configuration\n");
+		} else if (!strcmp(dbg_buff, "1")) {
 			pr_info(
-			"IPA is loading with non MHI configuration\n");
+				"IPA is loading with non MHI configuration\n");
+		} else {
+			IPAERR("got invalid string %s not loading FW\n",
+				dbg_buff);
+			return count;
 		}
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index adbd7b8..d7d74a3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1373,6 +1373,7 @@
 	int num_ipa_cne_evt_req;
 	struct mutex ipa_cne_evt_lock;
 	bool use_ipa_pm;
+	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
 };
 
 struct ipa3_plat_drv_res {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 3bf0327..be342cb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1030,8 +1030,9 @@
 				client->state);
 			spin_unlock_irqrestore(&client->state_lock, flags);
 		} else if (client->state ==
-			IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
-			IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+				IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
+			client->state ==
+				IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
 			run_algorithm = true;
 			client->state = IPA_PM_DEACTIVATED;
 			IPA_PM_DBG_STATE(client->hdl, client->name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 86d50f6..7421eb8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1583,7 +1583,6 @@
 		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
 		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
 		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
-		clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
 		break;
 	case IPA_RM_RESOURCE_MHI_CONS:
 		clients->names[i++] = IPA_CLIENT_MHI_CONS;
@@ -4318,6 +4317,38 @@
 	ipa3_ctx->tag_process_before_gating = val;
 }
 
+/**
+ * ipa3_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * @iface - type of vlan capable device
+ * @res - query result: true for vlan mode, false for non vlan mode
+ *
+ * API must be called after ipa_is_ready() returns true, otherwise it will fail
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	if (!res) {
+		IPAERR("NULL out param\n");
+		return -EINVAL;
+	}
+
+	if (iface < 0 || iface > IPA_VLAN_IF_MAX) {
+		IPAERR("invalid iface %d\n", iface);
+		return -EINVAL;
+	}
+
+	if (!ipa3_is_ready()) {
+		IPAERR("IPA is not ready yet\n");
+		return -ENODEV;
+	}
+
+	*res = ipa3_ctx->vlan_mode_iface[iface];
+
+	IPADBG("Driver %d vlan mode is %d\n", iface, *res);
+	return 0;
+}
+
 int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	struct ipa_api_controller *api_ctrl)
 {
@@ -4504,6 +4535,7 @@
 	api_ctrl->ipa_disable_wdi3_pipes = ipa3_disable_wdi3_pipes;
 	api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
 	api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
+	api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode;
 
 	return 0;
 }
@@ -4867,6 +4899,77 @@
 	}
 }
 
+static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa_mem_buffer mem;
+	int res = 0;
+	int i;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	memset(&mem, 0, sizeof(mem));
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
+		return res;
+	}
+
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
+		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
+			return res;
+
+		IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+		/* Send a 1B packet DMA_TASK to IPA and try again */
+		res = ipa3_inject_dma_task_for_gsi();
+		if (res) {
+			IPAERR("Failed to inject DMA TASk for GSI\n");
+			return res;
+		}
+
+		/* sleep for short period to flush IPA */
+		usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPAERR("Failed  to stop GSI channel with retries\n");
+	return -EFAULT;
+}
+
+/**
+ * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * @chan_hdl: GSI channel handle
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is in STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	int res;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	res = __ipa3_stop_gsi_channel(clnt_hdl);
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return res;
+}
+
 void ipa3_suspend_apps_pipes(bool suspend)
 {
 	struct ipa_ep_cfg_ctrl cfg;
@@ -4889,7 +4992,7 @@
 			ipa_ep_idx);
 		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 			if (suspend) {
-				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
 				if (res) {
 					IPAERR("failed to stop LAN channel\n");
 					ipa_assert();
@@ -4923,7 +5026,7 @@
 			ipa_ep_idx);
 		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 			if (suspend) {
-				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				res = __ipa3_stop_gsi_channel(ipa_ep_idx);
 				if (res) {
 					IPAERR("failed to stop WAN channel\n");
 					ipa_assert();
@@ -5013,73 +5116,6 @@
 	return 0;
 }
 
-/**
- * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
- * @chan_hdl: GSI channel handle
- *
- * This function implements the sequence to stop a GSI channel
- * in IPA. This function returns when the channel is is STOP state.
- *
- * Return value: 0 on success, negative otherwise
- */
-int ipa3_stop_gsi_channel(u32 clnt_hdl)
-{
-	struct ipa_mem_buffer mem;
-	int res = 0;
-	int i;
-	struct ipa3_ep_context *ep;
-
-	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
-		ipa3_ctx->ep[clnt_hdl].valid == 0) {
-		IPAERR("bad parm.\n");
-		return -EINVAL;
-	}
-
-	ep = &ipa3_ctx->ep[clnt_hdl];
-
-	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
-
-	memset(&mem, 0, sizeof(mem));
-
-	if (IPA_CLIENT_IS_PROD(ep->client)) {
-		IPADBG("Calling gsi_stop_channel ch:%lu\n",
-			ep->gsi_chan_hdl);
-		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
-			ep->gsi_chan_hdl, res);
-		goto end_sequence;
-	}
-
-	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
-		IPADBG("Calling gsi_stop_channel ch:%lu\n",
-			ep->gsi_chan_hdl);
-		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
-			ep->gsi_chan_hdl, res);
-		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
-			goto end_sequence;
-
-		IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
-		/* Send a 1B packet DMA_TASK to IPA and try again */
-		res = ipa3_inject_dma_task_for_gsi();
-		if (res) {
-			IPAERR("Failed to inject DMA TASk for GSI\n");
-			goto end_sequence;
-		}
-
-		/* sleep for short period to flush IPA */
-		usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
-			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
-	}
-
-	IPAERR("Failed  to stop GSI channel with retries\n");
-	res = -EFAULT;
-end_sequence:
-	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
-
-	return res;
-}
-
 static int ipa3_load_single_fw(const struct firmware *firmware,
 	const struct elf32_phdr *phdr)
 {
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 98861de..195799e 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -612,7 +612,8 @@
 		p_events[ev_ring_idx].rp =
 			(u32)event_ring_bufs[ev_ring_idx].phys_base;
 		p_events[ev_ring_idx].wp =
-			(u32)event_ring_bufs[ev_ring_idx].phys_base;
+			(u32)event_ring_bufs[ev_ring_idx].phys_base +
+			event_ring_bufs[ev_ring_idx].size - 16;
 	} else {
 		IPA_UT_LOG("Skip configuring event ring - already done\n");
 	}
@@ -3261,11 +3262,11 @@
 	IPA_UT_ADD_TEST(suspend_resume_with_open_aggr,
 		"several suspend/resume iterations with open aggregation frame",
 		ipa_mhi_test_in_loop_suspend_resume_aggr_open,
-		true, IPA_HW_v3_0, IPA_HW_MAX),
+		true, IPA_HW_v3_0, IPA_HW_v3_5_1),
 	IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr,
 		"several force suspend/resume iterations with open aggregation frame",
 		ipa_mhi_test_in_loop_force_suspend_resume_aggr_open,
-		true, IPA_HW_v3_0, IPA_HW_MAX),
+		true, IPA_HW_v3_0, IPA_HW_v3_5_1),
 	IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup,
 		"several suspend and host wakeup resume iterations",
 		ipa_mhi_test_in_loop_suspend_host_wakeup,
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 93121df..95e3782 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -321,10 +321,18 @@
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[pipe_connect->bam_type];
 	struct sps_mem_buffer *data_buf = &(pipe_connect->data_mem_buf);
 	struct sps_mem_buffer *desc_buf = &(pipe_connect->desc_mem_buf);
+	struct device *dev = &ctx->usb_bam_pdev->dev;
+	struct sg_table data_sgt, desc_sgt;
+	dma_addr_t data_iova, desc_iova;
+	u32 data_fifo_size;
 
 	pr_debug("%s: data_fifo size:%x desc_fifo_size:%x\n",
 				__func__, pipe_connect->data_fifo_size,
 				pipe_connect->desc_fifo_size);
+
+	if (dev->parent)
+		dev = dev->parent;
+
 	switch (pipe_connect->mem_type) {
 	case SPS_PIPE_MEM:
 		log_event_dbg("%s: USB BAM using SPS pipe memory\n", __func__);
@@ -366,7 +374,16 @@
 			ret = -ENOMEM;
 			goto err_exit;
 		}
+
 		memset_io(data_buf->base, 0, data_buf->size);
+		data_buf->iova = dma_map_resource(dev, data_buf->phys_base,
+					data_buf->size, DMA_BIDIRECTIONAL, 0);
+		if (dma_mapping_error(dev, data_buf->iova))
+			log_event_err("%s(): oci_mem: err mapping data_buf\n",
+								__func__);
+		log_event_dbg("%s: data_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+			__func__, dev_name(dev), data_buf->base,
+			(unsigned long)data_buf->phys_base, data_buf->iova);
 
 		desc_buf->phys_base = pipe_connect->desc_fifo_base_offset +
 				ctx->usb_bam_data->usb_bam_fifo_baseaddr;
@@ -380,6 +397,16 @@
 			goto err_exit;
 		}
 		memset_io(desc_buf->base, 0, desc_buf->size);
+		desc_buf->iova = dma_map_resource(dev, desc_buf->phys_base,
+					desc_buf->size,
+					DMA_BIDIRECTIONAL, 0);
+		if (dma_mapping_error(dev, desc_buf->iova))
+			log_event_err("%s(): oci_mem: err mapping desc_buf\n",
+								__func__);
+
+		log_event_dbg("%s: desc_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+			__func__, dev_name(dev), desc_buf->base,
+			(unsigned long)desc_buf->phys_base, desc_buf->iova);
 		break;
 	case SYSTEM_MEM:
 		log_event_dbg("%s: USB BAM using system memory\n", __func__);
@@ -391,56 +418,57 @@
 		}
 
 		/* BAM would use system memory, allocate FIFOs */
-		data_buf->size = pipe_connect->data_fifo_size;
+		data_fifo_size = data_buf->size = pipe_connect->data_fifo_size;
 		/* On platforms which use CI controller, USB HW can fetch
 		 * additional 128 bytes at the end of circular buffer when
 		 * AXI prefetch is enabled and hence requirement is to
 		 * allocate 512 bytes more than required length.
 		 */
 		if (pipe_connect->bam_type == CI_CTRL)
-			data_buf->base =
-				dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
-				(pipe_connect->data_fifo_size +
-					DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
-				&(data_buf->phys_base),
-				GFP_KERNEL);
-		else
-			data_buf->base =
-				dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
-				pipe_connect->data_fifo_size,
-				&(data_buf->phys_base),
-				GFP_KERNEL);
+			data_fifo_size += DATA_FIFO_EXTRA_MEM_ALLOC_SIZE;
+
+		data_buf->base = dma_alloc_attrs(dev, data_fifo_size,
+						&data_iova, GFP_KERNEL,
+						DMA_ATTR_FORCE_CONTIGUOUS);
 		if (!data_buf->base) {
-			log_event_err("%s: dma_alloc_coherent failed for data fifo\n",
+			log_event_err("%s: data_fifo: dma_alloc_attr failed\n",
 								__func__);
 			ret = -ENOMEM;
 			goto err_exit;
 		}
 		memset(data_buf->base, 0, pipe_connect->data_fifo_size);
 
+		data_buf->iova = data_iova;
+		dma_get_sgtable(dev, &data_sgt, data_buf->base, data_buf->iova,
+								data_fifo_size);
+		data_buf->phys_base = page_to_phys(sg_page(data_sgt.sgl));
+		sg_free_table(&data_sgt);
+		log_event_dbg("%s: data_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+			__func__, dev_name(dev), data_buf->base,
+			(unsigned long)data_buf->phys_base, data_buf->iova);
+
 		desc_buf->size = pipe_connect->desc_fifo_size;
-		desc_buf->base = dma_alloc_coherent(&ctx->usb_bam_pdev->dev,
-			pipe_connect->desc_fifo_size,
-			&(desc_buf->phys_base),
-			GFP_KERNEL);
+		desc_buf->base = dma_alloc_attrs(dev,
+				pipe_connect->desc_fifo_size,
+				&desc_iova, GFP_KERNEL,
+				DMA_ATTR_FORCE_CONTIGUOUS);
 		if (!desc_buf->base) {
-			log_event_err("%s: dma_alloc_coherent failed for desc fifo\n",
+			log_event_err("%s: desc_fifo: dma_alloc_attr failed\n",
 								__func__);
-			if (pipe_connect->bam_type == CI_CTRL)
-				dma_free_coherent(&ctx->usb_bam_pdev->dev,
-				(pipe_connect->data_fifo_size +
-					DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
-				data_buf->base,
-				data_buf->phys_base);
-			else
-				dma_free_coherent(&ctx->usb_bam_pdev->dev,
-				pipe_connect->data_fifo_size,
-				data_buf->base,
-				data_buf->phys_base);
+			dma_free_attrs(dev, data_fifo_size, data_buf->base,
+				data_buf->iova, DMA_ATTR_FORCE_CONTIGUOUS);
 			ret = -ENOMEM;
 			goto err_exit;
 		}
 		memset(desc_buf->base, 0, pipe_connect->desc_fifo_size);
+		desc_buf->iova = desc_iova;
+		dma_get_sgtable(dev, &desc_sgt, desc_buf->base, desc_buf->iova,
+								desc_buf->size);
+		desc_buf->phys_base = page_to_phys(sg_page(desc_sgt.sgl));
+		sg_free_table(&desc_sgt);
+		log_event_dbg("%s: desc_buf:%s virt:%pK, phys:%lx, iova:%lx\n",
+			__func__, dev_name(dev), desc_buf->base,
+			(unsigned long)desc_buf->phys_base, desc_buf->iova);
 		break;
 	default:
 		log_event_err("%s: invalid mem type\n", __func__);
@@ -476,35 +504,40 @@
 				&ctx->usb_bam_connections[idx];
 	struct sps_connect *sps_connection =
 				&ctx->usb_bam_sps.sps_connections[idx];
+	struct device *dev = &ctx->usb_bam_pdev->dev;
+	u32 data_fifo_size;
 
 	pr_debug("%s(): data size:%x desc size:%x\n",
 			__func__, sps_connection->data.size,
 			sps_connection->desc.size);
 
+	if (dev->parent)
+		dev = dev->parent;
+
 	switch (pipe_connect->mem_type) {
 	case SYSTEM_MEM:
 		log_event_dbg("%s: Freeing system memory used by PIPE\n",
 				__func__);
-		if (sps_connection->data.phys_base) {
+		if (sps_connection->data.iova) {
+			data_fifo_size = sps_connection->data.size;
 			if (cur_bam == CI_CTRL)
-				dma_free_coherent(&ctx->usb_bam_pdev->dev,
-					(sps_connection->data.size +
-						DATA_FIFO_EXTRA_MEM_ALLOC_SIZE),
+				data_fifo_size +=
+					DATA_FIFO_EXTRA_MEM_ALLOC_SIZE;
+
+			dma_free_attrs(dev, data_fifo_size,
 					sps_connection->data.base,
-					sps_connection->data.phys_base);
-			else
-				dma_free_coherent(&ctx->usb_bam_pdev->dev,
-					sps_connection->data.size,
-					sps_connection->data.base,
-					sps_connection->data.phys_base);
+					sps_connection->data.iova,
+					DMA_ATTR_FORCE_CONTIGUOUS);
+			sps_connection->data.iova = 0;
 			sps_connection->data.phys_base = 0;
 			pipe_connect->data_mem_buf.base = NULL;
 		}
-		if (sps_connection->desc.phys_base) {
-			dma_free_coherent(&ctx->usb_bam_pdev->dev,
-					sps_connection->desc.size,
+		if (sps_connection->desc.iova) {
+			dma_free_attrs(dev, sps_connection->desc.size,
 					sps_connection->desc.base,
-					sps_connection->desc.phys_base);
+					sps_connection->desc.iova,
+					DMA_ATTR_FORCE_CONTIGUOUS);
+			sps_connection->desc.iova = 0;
 			sps_connection->desc.phys_base = 0;
 			pipe_connect->desc_mem_buf.base = NULL;
 		}
@@ -512,11 +545,25 @@
 	case OCI_MEM:
 		log_event_dbg("Freeing oci memory used by BAM PIPE\n");
 		if (sps_connection->data.base) {
+			if (sps_connection->data.iova) {
+				dma_unmap_resource(dev,
+					sps_connection->data.iova,
+					sps_connection->data.size,
+					DMA_BIDIRECTIONAL, 0);
+				sps_connection->data.iova = 0;
+			}
 			iounmap(sps_connection->data.base);
 			sps_connection->data.base = NULL;
 			pipe_connect->data_mem_buf.base = NULL;
 		}
 		if (sps_connection->desc.base) {
+			if (sps_connection->desc.iova) {
+				dma_unmap_resource(dev,
+					sps_connection->desc.iova,
+					sps_connection->desc.size,
+					DMA_BIDIRECTIONAL, 0);
+				sps_connection->desc.iova = 0;
+			}
 			iounmap(sps_connection->desc.base);
 			sps_connection->desc.base = NULL;
 			pipe_connect->desc_mem_buf.base = NULL;
@@ -530,7 +577,8 @@
 	return 0;
 }
 
-static int connect_pipe(enum usb_ctrl cur_bam, u8 idx, u32 *usb_pipe_idx)
+static int connect_pipe(enum usb_ctrl cur_bam, u8 idx, u32 *usb_pipe_idx,
+							unsigned long iova)
 {
 	int ret;
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
@@ -575,9 +623,11 @@
 	if (dir == USB_TO_PEER_PERIPHERAL) {
 		sps_connection->mode = SPS_MODE_SRC;
 		*usb_pipe_idx = pipe_connect->src_pipe_index;
+		sps_connection->dest_iova = iova;
 	} else {
 		sps_connection->mode = SPS_MODE_DEST;
 		*usb_pipe_idx = pipe_connect->dst_pipe_index;
+		sps_connection->source_iova = iova;
 	}
 
 	sps_connection->data = *data_buf;
@@ -1059,7 +1109,34 @@
 	return 0;
 }
 
-int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
+int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+				phys_addr_t *p_addr, u32 *bam_size)
+{
+	int ret = 0;
+	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
+	struct usb_bam_pipe_connect *pipe_connect =
+					&ctx->usb_bam_connections[idx];
+	unsigned long peer_bam_handle;
+
+	ret = sps_phy2h(pipe_connect->dst_phy_addr, &peer_bam_handle);
+	if (ret) {
+		log_event_err("%s: sps_phy2h failed (src BAM) %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = sps_get_bam_addr(peer_bam_handle, p_addr, bam_size);
+	if (ret) {
+		log_event_err("%s: sps_get_bam_addr failed%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx,
+						unsigned long iova)
 {
 	int ret;
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
@@ -1110,7 +1187,7 @@
 	/* Set the BAM mode (host/device) according to connected pipe */
 	info[cur_bam].cur_bam_mode = pipe_connect->bam_mode;
 
-	ret = connect_pipe(cur_bam, idx, bam_pipe_idx);
+	ret = connect_pipe(cur_bam, idx, bam_pipe_idx, iova);
 	if (ret) {
 		log_event_err("%s: pipe connection[%d] failure\n",
 				__func__, idx);
@@ -3024,6 +3101,7 @@
 	struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
 	enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
 	struct sps_bam_props props;
+	struct device *dev;
 
 	memset(&props, 0, sizeof(props));
 
@@ -3059,8 +3137,16 @@
 		pr_debug("Register and enable HSUSB BAM\n");
 		props.options |= SPS_BAM_OPT_ENABLE_AT_BOOT;
 	}
-	ret = sps_register_bam_device(&props, &ctx->h_bam);
 
+	dev = &ctx->usb_bam_pdev->dev;
+	if (dev && dev->parent && !device_property_present(dev->parent,
+						"qcom,smmu-s1-bypass")) {
+		pr_info("%s: setting SPS_BAM_SMMU_EN flag with (%s)\n",
+						__func__, dev_name(dev));
+		props.options |= SPS_BAM_SMMU_EN;
+	}
+
+	ret = sps_register_bam_device(&props, &ctx->h_bam);
 	if (ret < 0) {
 		log_event_err("%s: register bam error %d\n", __func__, ret);
 		return -EFAULT;
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index bfc401a..5b31889 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -301,12 +301,29 @@
 			__raw_writel(0x7766550a, restart_reason);
 		} else if (!strncmp(cmd, "oem-", 4)) {
 			unsigned long code;
+			unsigned long reset_reason;
 			int ret;
 
 			ret = kstrtoul(cmd + 4, 16, &code);
-			if (!ret)
+			if (!ret) {
+				/* Bit-2 to bit-7 of SOFT_RB_SPARE for hard
+				 * reset reason:
+				 * Value 0 to 31 for common defined features
+				 * Value 32 to 63 for oem specific features
+				 */
+				reset_reason = code +
+						PON_RESTART_REASON_OEM_MIN;
+				if (reset_reason > PON_RESTART_REASON_OEM_MAX ||
+				   reset_reason < PON_RESTART_REASON_OEM_MIN) {
+					pr_err("Invalid oem reset reason: %lx\n",
+						reset_reason);
+				} else {
+					qpnp_pon_set_restart_reason(
+						reset_reason);
+				}
 				__raw_writel(0x6f656d00 | (code & 0xff),
 					     restart_reason);
+			}
 		} else if (!strncmp(cmd, "edl", 3)) {
 			enable_emergency_dload_mode();
 		} else {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a6bc1da..8060142 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6146,7 +6146,7 @@
 
 out:
 	ufshcd_scsi_unblock_requests(hba);
-	pm_runtime_put_sync(hba->dev);
+	pm_runtime_put(hba->dev);
 	return;
 }
 
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index ea7374f..187c80d 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -557,6 +557,12 @@
 	len = fifo_write_body(einfo, data, len, &write_index);
 	if (unlikely(len < 0))
 		return len;
+
+	/* All data writes need to be flushed to memory before the write index
+	 * is updated. This protects against a race condition where the remote
+	 * reads stale data because the write index was written before the data.
+	 */
+	wmb();
 	einfo->tx_ch_desc->write_index = write_index;
 	send_irq(einfo);
 
@@ -599,6 +605,11 @@
 	if (unlikely(len3 < 0))
 		return len3;
 
+	/* All data writes need to be flushed to memory before the write index
+	 * is updated. This protects against a race condition where the remote
+	 * reads stale data because the write index was written before the data.
+	 */
+	wmb();
 	einfo->tx_ch_desc->write_index = write_index;
 	send_irq(einfo);
 
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 5dd49e8..8a835f5 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -80,6 +80,7 @@
 	const struct llcc_slice_config *llcc_data_ptr;
 	struct llcc_slice_desc *desc;
 	struct platform_device *pdev;
+	u32 sz, count;
 
 	if (of_parse_phandle_with_args(dev->of_node, "cache-slices",
 				       "#cache-cells", n, &phargs)) {
@@ -100,14 +101,17 @@
 	}
 
 	llcc_data_ptr = drv->slice_data;
+	sz = drv->llcc_config_data_sz;
+	count = 0;
 
-	while (llcc_data_ptr) {
+	while (llcc_data_ptr && count < sz) {
 		if (llcc_data_ptr->usecase_id == phargs.args[0])
 			break;
 		llcc_data_ptr++;
+		count++;
 	}
 
-	if (llcc_data_ptr == NULL) {
+	if (llcc_data_ptr == NULL || count == sz) {
 		pr_err("can't find %d usecase id\n", phargs.args[0]);
 		return ERR_PTR(-ENODEV);
 	}
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 9c764aa..310a186 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -37,13 +37,12 @@
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
-		msleep(25); /* To be tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(true);
 		break;
 
 	case PM_POST_SUSPEND:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
-		msleep(25); /* To be tuned based on SMP2P latencies */
+		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(false);
 		break;
 	}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 720ac31..c1103c7 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1523,6 +1523,11 @@
 	}
 	buffer = dmabuf->priv;
 
+	if (!is_buffer_hlos_assigned(buffer)) {
+		pr_err("%s: cannot sync a secure dmabuf\n", __func__);
+		dma_buf_put(dmabuf);
+		return -EINVAL;
+	}
 	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
 			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
 	dma_buf_put(dmabuf);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index c7b58ce..9d53391 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -88,6 +88,10 @@
 		.name	= ION_QSECOM_HEAP_NAME,
 	},
 	{
+		.id	= ION_QSECOM_TA_HEAP_ID,
+		.name	= ION_QSECOM_TA_HEAP_NAME,
+	},
+	{
 		.id	= ION_SPSS_HEAP_ID,
 		.name	= ION_SPSS_HEAP_NAME,
 	},
@@ -340,7 +344,7 @@
 	if (!ION_IS_CACHED(flags))
 		return 0;
 
-	if (flags & ION_FLAG_SECURE)
+	if (!is_buffer_hlos_assigned(ion_handle_buffer(handle)))
 		return 0;
 
 	table = ion_sg_table(client, handle);
@@ -675,6 +679,20 @@
 	return -EINVAL;
 }
 
+bool is_buffer_hlos_assigned(struct ion_buffer *buffer)
+{
+	bool is_hlos = false;
+
+	if (buffer->heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA &&
+	    (buffer->flags & ION_FLAG_CP_HLOS))
+		is_hlos = true;
+
+	if (get_secure_vmid(buffer->flags) <= 0)
+		is_hlos = true;
+
+	return is_hlos;
+}
+
 int get_vmid(unsigned long flags)
 {
 	int vmid;
@@ -751,9 +769,9 @@
 
 		down_read(&mm->mmap_sem);
 
-		start = (unsigned long)data.flush_data.vaddr;
-		end = (unsigned long)data.flush_data.vaddr
-			+ data.flush_data.length;
+		start = (unsigned long)data.flush_data.vaddr +
+			data.flush_data.offset;
+		end = start + data.flush_data.length;
 
 		if (check_vaddr_bounds(start, end)) {
 			pr_err("%s: virtual address %pK is out of bounds\n",
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
index 741d017..ad7b1c5 100644
--- a/drivers/staging/android/ion/msm/msm_ion.h
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -174,6 +174,8 @@
 		void *vaddr, unsigned int offset, unsigned long len,
 		unsigned int cmd);
 
+bool is_buffer_hlos_assigned(struct ion_buffer *buffer);
+
 #else
 static inline struct ion_client *msm_ion_client_create(const char *name)
 {
@@ -202,6 +204,10 @@
 	return -ENODEV;
 }
 
+static bool is_buffer_hlos_assigned(struct ion_buffer *buffer)
+{
+	return true;
+}
 #endif /* CONFIG_ION */
 
 #endif
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 8d67f76..4747949 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -221,6 +221,22 @@
 	return 0;
 }
 
+static int test_task_state(struct task_struct *p, int state)
+{
+	struct task_struct *t;
+
+	for_each_thread(p, t) {
+		task_lock(t);
+		if (t->state & state) {
+			task_unlock(t);
+			return 1;
+		}
+		task_unlock(t);
+	}
+
+	return 0;
+}
+
 static int test_task_lmk_waiting(struct task_struct *p)
 {
 	struct task_struct *t;
@@ -435,7 +451,7 @@
 	int other_free;
 	int other_file;
 
-	if (mutex_lock_interruptible(&scan_mutex) < 0)
+	if (!mutex_trylock(&scan_mutex))
 		return 0;
 
 	other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
@@ -495,8 +511,6 @@
 		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
 			if (test_task_lmk_waiting(tsk)) {
 				rcu_read_unlock();
-				/* give the system time to free up the memory */
-				msleep_interruptible(20);
 				mutex_unlock(&scan_mutex);
 				return 0;
 			}
@@ -533,6 +547,16 @@
 		long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
 		long free = other_free * (long)(PAGE_SIZE / 1024);
 
+		if (test_task_lmk_waiting(selected) &&
+		    (test_task_state(selected, TASK_UNINTERRUPTIBLE))) {
+			lowmem_print(2, "'%s' (%d) is already killed\n",
+				     selected->comm,
+				     selected->pid);
+			rcu_read_unlock();
+			mutex_unlock(&scan_mutex);
+			return 0;
+		}
+
 		task_lock(selected);
 		send_sig(SIGKILL, selected, 0);
 		if (selected->mm)
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 84598db..4f9dd73 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -33,6 +33,7 @@
 	ION_CP_MFC_HEAP_ID = 12,
 	ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
 	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_QSECOM_TA_HEAP_ID = 19,
 	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
 	ION_SYSTEM_CONTIG_HEAP_ID = 21,
 	ION_ADSP_HEAP_ID = 22,
@@ -130,6 +131,7 @@
 #define ION_PIL1_HEAP_NAME  "pil_1"
 #define ION_PIL2_HEAP_NAME  "pil_2"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_QSECOM_TA_HEAP_NAME	"qsecom_ta"
 #define ION_SECURE_HEAP_NAME	"secure_heap"
 #define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
 
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index acbd26b..27bf54b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -58,4 +58,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
-obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index 2e795b1..e1fc6b9 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -12,7 +12,9 @@
  */
 
 #include <asm/arch_timer.h>
+#include <linux/platform_device.h>
 #include "tsens.h"
+#include "tsens-mtc.h"
 
 /* debug defines */
 #define	TSENS_DBG_BUS_ID_0			0
@@ -42,6 +44,177 @@
 	int (*dbg_func)(struct tsens_device *, u32, u32, int *);
 };
 
+static ssize_t
+zonemask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Zone =%d th1=%d th2=%d\n", tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+}
+
+static ssize_t
+zonemask_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = sscanf(buf, "%d %d %d", &tmdev->mtcsys.zone_mtc,
+				&tmdev->mtcsys.th1, &tmdev->mtcsys.th2);
+
+	if (ret != TSENS_ZONEMASK_PARAMS) {
+		pr_err("Invalid command line arguments\n");
+		count = -EINVAL;
+	} else {
+		pr_debug("store zone_mtc=%d th1=%d th2=%d\n",
+				tmdev->mtcsys.zone_mtc,
+				tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+		ret = tsens_set_mtc_zone_sw_mask(tmdev->mtcsys.zone_mtc,
+					tmdev->mtcsys.th1, tmdev->mtcsys.th2);
+		if (ret < 0) {
+			pr_err("Invalid command line arguments\n");
+			count = -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t
+zonelog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zlog[TSENS_MTC_ZONE_LOG_SIZE];
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_log(tmdev->mtcsys.zone_log, zlog);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Log[0]=%d\nLog[1]=%d\nLog[2]=%d\nLog[3]=%d\nLog[4]=%d\nLog[5]=%d\n",
+			zlog[0], zlog[1], zlog[2], zlog[3], zlog[4], zlog[5]);
+}
+
+static ssize_t
+zonelog_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_log);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+zonehist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret, zhist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = tsens_get_mtc_zone_history(tmdev->mtcsys.zone_hist, zhist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE,
+		"Cool = %d\nYellow = %d\nRed = %d\n",
+			zhist[0], zhist[1], zhist[2]);
+}
+
+static ssize_t
+zonehist_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int ret;
+	struct tsens_device *tmdev = NULL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	ret = kstrtou32(buf, 0, &tmdev->mtcsys.zone_hist);
+	if (ret < 0) {
+		pr_err("Invalid command line arguments\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static struct device_attribute tsens_mtc_dev_attr[] = {
+	__ATTR(zonemask, 0644, zonemask_show, zonemask_store),
+	__ATTR(zonelog, 0644, zonelog_show, zonelog_store),
+	__ATTR(zonehist, 0644, zonehist_show, zonehist_store),
+};
+
+static int tsens_dbg_mtc_data(struct tsens_device *data,
+					u32 id, u32 dbg_type, int *val)
+{
+	int result = 0, i;
+	struct tsens_device *tmdev = NULL;
+	struct device_attribute *attr_ptr = NULL;
+
+	attr_ptr = tsens_mtc_dev_attr;
+	tmdev = data;
+
+	for (i = 0; i < ARRAY_SIZE(tsens_mtc_dev_attr); i++) {
+		result = device_create_file(&tmdev->pdev->dev, &attr_ptr[i]);
+		if (result < 0)
+			goto error;
+	}
+
+	return result;
+
+error:
+	for (i--; i >= 0; i--)
+		device_remove_file(&tmdev->pdev->dev, &attr_ptr[i]);
+
+	return result;
+}
+
 static int tsens_dbg_log_temp_reads(struct tsens_device *data, u32 id,
 					u32 dbg_type, int *temp)
 {
@@ -206,6 +379,7 @@
 	[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
 			tsens_dbg_log_interrupt_timestamp},
 	[TSENS_DBG_LOG_BUS_ID_DATA] = {tsens_dbg_log_bus_id_data},
+	[TSENS_DBG_MTC_DATA] = {tsens_dbg_mtc_data},
 };
 
 int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
diff --git a/drivers/thermal/tsens-mtc.c b/drivers/thermal/tsens-mtc.c
new file mode 100644
index 0000000..529503f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.c
@@ -0,0 +1,195 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "tsens.h"
+#include "tsens-mtc.h"
+
+struct tsens_device *tsens_controller_is_present(void)
+{
+	struct tsens_device *tmdev_chip = NULL;
+
+	if (list_empty(&tsens_device_list)) {
+		pr_err("%s: TSENS controller not available\n", __func__);
+		return tmdev_chip;
+	}
+
+	list_for_each_entry(tmdev_chip, &tsens_device_list, list)
+		return tmdev_chip;
+
+	return tmdev_chip;
+}
+EXPORT_SYMBOL(tsens_controller_is_present);
+
+static int tsens_mtc_reset_history_counter(unsigned int zone)
+{
+	unsigned int reg_cntl, is_valid;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(tmdev->tsens_tm_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+		(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_RESET_HISTORY_MASK)
+				>> TSENS_RESET_HISTORY_SHIFT;
+	if (!is_valid) {
+		/*Enable the bit to reset counter*/
+		writel_relaxed(reg_cntl | (1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+		reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+		pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+	}
+
+	/*Disble the bit to start counter*/
+	writel_relaxed(reg_cntl & ~(1 << TSENS_RESET_HISTORY_SHIFT),
+				(sensor_addr + (zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+			(zone * TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d reg=%x\n", zone, reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_mtc_reset_history_counter);
+
+int tsens_set_mtc_zone_sw_mask(unsigned int zone, unsigned int th1_enable,
+				unsigned int th2_enable)
+{
+	unsigned int reg_cntl;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_SW_MASK_ADDR
+					(tmdev->tsens_tm_addr);
+
+	if (th1_enable && th2_enable)
+		writel_relaxed(TSENS_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && !th2_enable)
+		writel_relaxed(TSENS_MTC_DISABLE,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (th1_enable && !th2_enable)
+		writel_relaxed(TSENS_TH1_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	if (!th1_enable && th2_enable)
+		writel_relaxed(TSENS_TH2_MTC_IN_EFFECT,
+				(sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	pr_debug("tsens : zone =%d th1=%d th2=%d reg=%x\n",
+		zone, th1_enable, th2_enable, reg_cntl);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_set_mtc_zone_sw_mask);
+
+int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log)
+{
+	unsigned int i, reg_cntl, is_valid, log[TSENS_MTC_ZONE_LOG_SIZE];
+	int *zlog = (int *)zone_log;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_LOG(tmdev->tsens_tm_addr);
+
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+	is_valid = (reg_cntl & TSENS_LOGS_VALID_MASK)
+				>> TSENS_LOGS_VALID_SHIFT;
+	if (is_valid) {
+		log[0] = (reg_cntl & TSENS_LOGS_LATEST_MASK);
+		log[1] = (reg_cntl & TSENS_LOGS_LOG1_MASK)
+				>> TSENS_LOGS_LOG1_SHIFT;
+		log[2] = (reg_cntl & TSENS_LOGS_LOG2_MASK)
+				>> TSENS_LOGS_LOG2_SHIFT;
+		log[3] = (reg_cntl & TSENS_LOGS_LOG3_MASK)
+				>> TSENS_LOGS_LOG3_SHIFT;
+		log[4] = (reg_cntl & TSENS_LOGS_LOG4_MASK)
+				>> TSENS_LOGS_LOG4_SHIFT;
+		log[5] = (reg_cntl & TSENS_LOGS_LOG5_MASK)
+				>> TSENS_LOGS_LOG5_SHIFT;
+		for (i = 0; i < (TSENS_MTC_ZONE_LOG_SIZE); i++) {
+			*(zlog+i) = log[i];
+			pr_debug("Log[%d]=%d\n", i, log[i]);
+		}
+	} else {
+		pr_debug("tsens: Valid bit disabled\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_log);
+
+int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist)
+{
+	unsigned int i, reg_cntl, hist[TSENS_MTC_ZONE_HISTORY_SIZE];
+	int *zhist = (int *)zone_hist;
+	void __iomem *sensor_addr;
+	struct tsens_device *tmdev = NULL;
+
+	if (zone > TSENS_NUM_MTC_ZONES_SUPPORT)
+		return -EINVAL;
+
+	tmdev = tsens_controller_is_present();
+	if (!tmdev) {
+		pr_err("No TSENS controller present\n");
+		return -EPROBE_DEFER;
+	}
+
+	sensor_addr = TSENS_TM_MTC_ZONE0_HISTORY(tmdev->tsens_tm_addr);
+	reg_cntl = readl_relaxed((sensor_addr +
+				(zone * TSENS_SN_ADDR_OFFSET)));
+
+	hist[0] = (reg_cntl & TSENS_PS_COOL_CMD_MASK);
+	hist[1] = (reg_cntl & TSENS_PS_YELLOW_CMD_MASK)
+			>> TSENS_PS_YELLOW_CMD_SHIFT;
+	hist[2] = (reg_cntl & TSENS_PS_RED_CMD_MASK)
+			>> TSENS_PS_RED_CMD_SHIFT;
+	for (i = 0; i < (TSENS_MTC_ZONE_HISTORY_SIZE); i++) {
+		*(zhist+i) = hist[i];
+		pr_debug("tsens : %d\n", hist[i]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens_get_mtc_zone_history);
diff --git a/drivers/thermal/tsens-mtc.h b/drivers/thermal/tsens-mtc.h
new file mode 100644
index 0000000..979513f
--- /dev/null
+++ b/drivers/thermal/tsens-mtc.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define TSENS_NUM_MTC_ZONES_SUPPORT 3
+#define TSENS_TM_MTC_ZONE0_SW_MASK_ADDR(n)      ((n) + 0x140)
+#define TSENS_TM_MTC_ZONE0_LOG(n)               ((n) + 0x150)
+#define TSENS_TM_MTC_ZONE0_HISTORY(n)           ((n) + 0x160)
+#define TSENS_SN_ADDR_OFFSET             0x4
+#define TSENS_RESET_HISTORY_MASK        0x4
+#define TSENS_ZONEMASK_PARAMS           3
+#define TSENS_MTC_ZONE_LOG_SIZE         6
+#define TSENS_MTC_ZONE_HISTORY_SIZE     3
+
+#define TSENS_TH1_MTC_IN_EFFECT               BIT(0)
+#define TSENS_TH2_MTC_IN_EFFECT               BIT(1)
+#define TSENS_MTC_IN_EFFECT                     0x3
+#define TSENS_MTC_DISABLE                       0x0
+
+#define TSENS_LOGS_VALID_MASK      0x40000000
+#define TSENS_LOGS_VALID_SHIFT     30
+#define TSENS_LOGS_LATEST_MASK    0x0000001f
+#define TSENS_LOGS_LOG1_MASK      0x000003e0
+#define TSENS_LOGS_LOG2_MASK      0x00007c00
+#define TSENS_LOGS_LOG3_MASK      0x000f8000
+#define TSENS_LOGS_LOG4_MASK      0x01f00000
+#define TSENS_LOGS_LOG5_MASK      0x3e000000
+#define TSENS_LOGS_LOG1_SHIFT     5
+#define TSENS_LOGS_LOG2_SHIFT     10
+#define TSENS_LOGS_LOG3_SHIFT     15
+#define TSENS_LOGS_LOG4_SHIFT     20
+#define TSENS_LOGS_LOG5_SHIFT     25
+
+#define TSENS_PS_RED_CMD_MASK   0x3ff00000
+#define TSENS_PS_YELLOW_CMD_MASK        0x000ffc00
+#define TSENS_PS_COOL_CMD_MASK  0x000003ff
+#define TSENS_PS_YELLOW_CMD_SHIFT       0xa
+#define TSENS_PS_RED_CMD_SHIFT  0x14
+
+#define TSENS_RESET_HISTORY_SHIFT       2
+
+#define TSENS_ZONEMASK_PARAMS           3
+#define TSENS_MTC_ZONE_LOG_SIZE         6
+#define TSENS_MTC_ZONE_HISTORY_SIZE     3
+
+extern int tsens_get_mtc_zone_history(unsigned int zone, void *zone_hist);
+extern struct tsens_device *tsens_controller_is_present(void);
+extern int tsens_set_mtc_zone_sw_mask(unsigned int zone,
+			unsigned int th1_enable, unsigned int th2_enable);
+extern int tsens_get_mtc_zone_log(unsigned int zone, void *zone_log);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index ec2d592..ae4741d 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -32,6 +32,7 @@
 	TSENS_DBG_LOG_TEMP_READS,
 	TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
 	TSENS_DBG_LOG_BUS_ID_DATA,
+	TSENS_DBG_MTC_DATA,
 	TSENS_DBG_LOG_MAX
 };
 
@@ -114,6 +115,15 @@
 	u32				cycle_compltn_monitor_mask;
 	bool				wd_bark;
 	u32				wd_bark_mask;
+	bool				mtc;
+};
+
+struct tsens_mtc_sysfs {
+	uint32_t	zone_log;
+	int			zone_mtc;
+	int			th1;
+	int			th2;
+	uint32_t	zone_hist;
 };
 
 struct tsens_device {
@@ -130,8 +140,10 @@
 	spinlock_t			tsens_upp_low_lock;
 	const struct tsens_data		*ctrl_data;
 	struct tsens_sensor		sensor[0];
+	struct tsens_mtc_sysfs	mtcsys;
 };
 
 extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern struct list_head tsens_device_list;
 
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index fd625ae..50c847f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -570,6 +570,11 @@
 	spin_lock_init(&tmdev->tsens_crit_lock);
 	spin_lock_init(&tmdev->tsens_upp_low_lock);
 
+	if (tmdev->ctrl_data->mtc) {
+		if (tmdev->ops->dbg)
+			tmdev->ops->dbg(tmdev, 0, TSENS_DBG_MTC_DATA, NULL);
+	}
+
 	return 0;
 }
 
@@ -628,6 +633,7 @@
 	.wd_bark			= false,
 	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= true,
 };
 
 const struct tsens_data data_tsens23xx = {
@@ -636,6 +642,7 @@
 	.wd_bark			= true,
 	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
 };
 
 const struct tsens_data data_tsens24xx = {
@@ -645,4 +652,5 @@
 	/* Enable Watchdog monitoring by unmasking */
 	.wd_bark_mask			= 0,
 	.ops				= &ops_tsens2xxx,
+	.mtc				= false,
 };
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 67a71ba..0ce23c3 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1286,6 +1286,8 @@
 
 	geni_se_rx_dma_unprep(msm_port->wrapper_dev, msm_port->rx_dma,
 			      DMA_RX_BUF_SIZE);
+	msm_port->rx_dma = (dma_addr_t)NULL;
+
 	rx_bytes = geni_read_reg_nolog(uport->membase, SE_DMA_RX_LEN_IN);
 	if (unlikely(!msm_port->rx_buf)) {
 		IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: NULL Rx_buf\n",
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 64ed834..719fcbf 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1173,6 +1173,9 @@
 	device_property_read_u32(dev, "snps,xhci-imod-value",
 			&dwc->xhci_imod_value);
 
+	dwc->core_id = -1;
+	device_property_read_u32(dev, "usb-core-id", &dwc->core_id);
+
 	dwc->usb3_lpm_capable = device_property_read_bool(dev,
 				"snps,usb3_lpm_capable");
 
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 68a40f9..a8400dd 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -954,6 +954,7 @@
  *			increments or 0 to disable.
  * @create_reg_debugfs: create debugfs entry to allow dwc3 register dump
  * @xhci_imod_value: imod value to use with xhci
+ * @core_id: usb core id to differentiate different controller
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
@@ -1150,6 +1151,7 @@
 	struct dwc3_gadget_events	dbg_gadget_events;
 	bool			create_reg_debugfs;
 	u32			xhci_imod_value;
+	int			core_id;
 };
 
 /* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
index 3860a1a..44c082a 100644
--- a/drivers/usb/dwc3/dbm.c
+++ b/drivers/usb/dwc3/dbm.c
@@ -450,7 +450,7 @@
 }
 
 
-int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, unsigned long addr,
 				u32 size, u8 dst_pipe_idx)
 {
 	u8 dbm_ep = dst_pipe_idx;
diff --git a/drivers/usb/dwc3/dbm.h b/drivers/usb/dwc3/dbm.h
index 260afc2..d8e1ce9 100644
--- a/drivers/usb/dwc3/dbm.h
+++ b/drivers/usb/dwc3/dbm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -63,7 +63,7 @@
 int dbm_get_num_of_eps_configured(struct dbm *dbm);
 int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
 				int size);
-int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, unsigned long addr,
 				u32 size, u8 dst_pipe_idx);
 void dbm_set_speed(struct dbm *dbm, bool speed);
 void dbm_enable(struct dbm *dbm);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 3b92db9..dadd61e 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -229,6 +229,7 @@
 	struct power_supply	*usb_psy;
 	struct work_struct	vbus_draw_work;
 	bool			in_host_mode;
+	bool			in_device_mode;
 	enum usb_device_speed	max_rh_port_speed;
 	unsigned int		tx_fifo_size;
 	bool			vbus_active;
@@ -461,7 +462,7 @@
  * @size - size of data fifo.
  *
  */
-int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr,
 			 u32 size, u8 dst_pipe_idx)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
@@ -2002,7 +2003,7 @@
 	unsigned long timeout;
 	u32 reg = 0;
 
-	if ((mdwc->in_host_mode || mdwc->vbus_active)
+	if ((mdwc->in_host_mode || mdwc->in_device_mode)
 			&& dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
 		if (!atomic_read(&mdwc->in_p3)) {
 			dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
@@ -2265,7 +2266,8 @@
 	clk_disable_unprepare(mdwc->xo_clk);
 
 	/* Perform controller power collapse */
-	if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
+	if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
+					mdwc->in_restart)) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
@@ -2307,7 +2309,7 @@
 	 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
 	 * case of host bus suspend and device bus suspend.
 	 */
-	if (mdwc->vbus_active || mdwc->in_host_mode) {
+	if (mdwc->in_device_mode || mdwc->in_host_mode) {
 		if (mdwc->use_pdc_interrupts) {
 			enable_usb_pdc_interrupt(mdwc, true);
 		} else {
@@ -2320,6 +2322,7 @@
 	}
 
 	dev_info(mdwc->dev, "DWC3 in low power mode\n");
+	dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
 	mutex_unlock(&mdwc->suspend_resume_mutex);
 	return 0;
 }
@@ -3020,6 +3023,13 @@
 		return ret;
 	}
 	dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
+	ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
+			DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR, &atomic_ctx);
+	if (ret) {
+		dev_err(mdwc->dev, "set UPSTREAM_IOVA_ALLOCATOR failed(%d)\n",
+				ret);
+		goto release_mapping;
+	}
 
 	ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
 			&atomic_ctx);
@@ -3141,14 +3151,6 @@
 	if (!mdwc)
 		return -ENOMEM;
 
-	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
-		dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
-		if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
-			dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
-			return -EOPNOTSUPP;
-		}
-	}
-
 	platform_set_drvdata(pdev, mdwc);
 	mdwc->dev = &pdev->dev;
 
@@ -3341,6 +3343,15 @@
 	if (ret)
 		goto err;
 
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+		dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
+		if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+			dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
+			ret = -EOPNOTSUPP;
+			goto uninit_iommu;
+		}
+	}
+
 	/* Assumes dwc3 is the first DT child of dwc3-msm */
 	dwc3_node = of_get_next_available_child(node, NULL);
 	if (!dwc3_node) {
@@ -3890,6 +3901,7 @@
 		dwc3_msm_block_reset(mdwc, false);
 
 		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+		mdwc->in_device_mode = true;
 		usb_gadget_vbus_connect(&dwc->gadget);
 #ifdef CONFIG_SMP
 		mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
@@ -3908,6 +3920,7 @@
 		msm_dwc3_perf_vote_update(mdwc, false);
 		pm_qos_remove_request(&mdwc->pm_qos_req_dma);
 
+		mdwc->in_device_mode = false;
 		usb_gadget_vbus_disconnect(&dwc->gadget);
 		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
 		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
@@ -3944,6 +3957,7 @@
 	if (ret)
 		goto err;
 
+	dbg_event(0xFF, "USB_lpm_state", atomic_read(&dwc->in_lpm));
 	/*
 	 * stop host mode functionality performs autosuspend with mdwc
 	 * device, and it may take sometime to call PM runtime suspend.
@@ -3951,6 +3965,12 @@
 	 * suspend immediately to put USB controller and PHYs into suspend.
 	 */
 	ret = pm_runtime_suspend(mdwc->dev);
+	/*
+	 * If mdwc device is already suspended, pm_runtime_suspend() API
+	 * returns 1, which is not error. Overwrite with zero if it is.
+	 */
+	if (ret > 0)
+		ret = 0;
 	dbg_event(0xFF, "pm_runtime_sus", ret);
 
 	dwc->maximum_speed = usb_speed;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 3f79aa4..8b159c3 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -52,7 +52,7 @@
 	return irq;
 }
 
-#define NUMBER_OF_PROPS	4
+#define NUMBER_OF_PROPS	5
 int dwc3_host_init(struct dwc3 *dwc)
 {
 	struct property_entry	props[NUMBER_OF_PROPS];
@@ -62,6 +62,7 @@
 	struct platform_device	*dwc3_pdev = to_platform_device(dwc->dev);
 	int			prop_idx = 0;
 	struct property_entry	imod_prop;
+	struct property_entry	core_id_prop;
 
 	irq = dwc3_host_get_irq(dwc);
 	if (irq < 0)
@@ -112,6 +113,15 @@
 		props[prop_idx++] = imod_prop;
 	}
 
+	if (dwc->core_id >= 0) {
+		core_id_prop.name  = "usb-core-id";
+		core_id_prop.length  = sizeof(u32);
+		core_id_prop.is_string = false;
+		core_id_prop.is_array = false;
+		core_id_prop.value.u32_data = dwc->core_id;
+		props[prop_idx++] = core_id_prop;
+	}
+
 	/**
 	 * WORKAROUND: dwc3 revisions <=3.00a have a limitation
 	 * where Port Disable command doesn't work.
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b040fdd..31c1dd2b 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -182,6 +182,9 @@
 config USB_F_RNDIS
 	tristate
 
+config USB_F_QCRNDIS
+	tristate
+
 config USB_F_MASS_STORAGE
 	tristate
 
@@ -312,6 +315,14 @@
 	  On hardware that can't implement the full protocol,
 	  a simple CDC subset is used, placing fewer demands on USB.
 
+config USB_CONFIGFS_QCRNDIS
+	bool "QCRNDIS"
+	depends on USB_CONFIGFS
+	depends on RNDIS_IPA
+	depends on NET
+	select USB_U_ETHER
+	select USB_F_QCRNDIS
+
 config USB_CONFIGFS_RNDIS
 	bool "RNDIS"
 	depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 960c2cc..90c426b 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -64,3 +64,5 @@
 obj-$(CONFIG_USB_F_GSI)         += usb_f_gsi.o
 usb_f_qdss-y			:= f_qdss.o u_qdss.o
 obj-$(CONFIG_USB_F_QDSS)        += usb_f_qdss.o
+usb_f_qcrndis-y			:= f_qc_rndis.o rndis.o u_data_ipa.o
+obj-$(CONFIG_USB_F_QCRNDIS)	+= usb_f_qcrndis.o
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
new file mode 100644
index 0000000..a8e7092
--- /dev/null
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -0,0 +1,1580 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *			Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+#include "u_data_ipa.h"
+#include <linux/rndis_ipa.h>
+#include "configfs.h"
+
+unsigned int rndis_dl_max_xfer_size = 9216;
+module_param(rndis_dl_max_xfer_size, uint, 0644);
+MODULE_PARM_DESC(rndis_dl_max_xfer_size,
+		"Max size of bus transfer to host");
+
+static struct class *rndis_class;
+static dev_t rndis_dev;
+static DEFINE_IDA(chardev_ida);
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data.  The control model is built around
+ * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored).  RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface.  That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely.  Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ *   - Power management ... references data that's scattered around lots
+ *     of other documentation, which is incorrect/incomplete there too.
+ *
+ *   - There are various undocumented protocol requirements, like the need
+ *     to send garbage in some control-OUT messages.
+ *
+ *   - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+	struct usb_function		func;
+	u8				ctrl_id, data_id;
+	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	u8				ul_max_pkt_per_xfer;
+	u8				pkt_alignment_factor;
+	u32				max_pkt_size;
+	const char			*manufacturer;
+	struct rndis_params		*params;
+	atomic_t			ioctl_excl;
+	atomic_t			open_excl;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	atomic_t			notify_count;
+	struct gadget_ipa_port		bam_port;
+	struct cdev			cdev;
+	struct device			*dev;
+	u8				port_num;
+	u16				cdc_filter;
+	bool				net_ready_trigger;
+};
+
+static struct ipa_usb_init_params rndis_ipa_params;
+static spinlock_t rndis_lock;
+static bool rndis_ipa_supported;
+static void rndis_qc_open(struct f_rndis_qc *rndis);
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+	return container_of(f, struct f_rndis_qc, func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT		8	/* 8 bytes data */
+
+/* currently only one rndis instance is supported - port
+ * index 0.
+ */
+#define RNDIS_QC_NO_PORTS				1
+#define RNDIS_QC_ACTIVE_PORT				0
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER			15
+
+/* default pkt alignment factor */
+#define DEFAULT_PKT_ALIGNMENT_FACTOR			4
+
+#define RNDIS_QC_IOCTL_MAGIC		'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER   _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE	_IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
+
+
+/* interface descriptor: */
+
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+	.bLength =		sizeof(rndis_qc_control_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bInterfaceSubClass =   0x01,
+	.bInterfaceProtocol =   0x03,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+	.bLength =		sizeof(rndis_qc_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+	.bLength =		sizeof(rndis_qc_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+	.bLength =		sizeof(rndis_qc_acm_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+	.bLength =		sizeof(rndis_qc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+	.bLength =		sizeof(rndis_qc_data_intf),
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+
+/*  Supports "Wireless" RNDIS; auto-detected by Windows */
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+	.bLength =		sizeof(rndis_qc_iad_descriptor),
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount =	2, /* control + data */
+	.bFunctionClass =	USB_CLASS_WIRELESS_CONTROLLER,
+	.bFunctionSubClass =	0x01,
+	.bFunctionProtocol =	0x03,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+	.bLength =		sizeof(ss_intr_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+	.bLength =		sizeof(ss_intr_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+	.bLength =		sizeof(ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+	.bLength =		sizeof(ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+	&rndis_qc_string_table,
+	NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1)
+		return 0;
+
+	atomic_dec(excl);
+	return -EBUSY;
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void rndis_qc_response_available(void *_rndis)
+{
+	struct f_rndis_qc			*rndis = _rndis;
+	struct usb_request		*req = rndis->notify_req;
+	__le32				*data = req->buf;
+	int				status;
+
+	if (atomic_inc_return(&rndis->notify_count) != 1)
+		return;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	/* Send RNDIS RESPONSE_AVAILABLE notification; a
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+	 *
+	 * This is the only notification defined by RNDIS.
+	 */
+	data[0] = cpu_to_le32(1);
+	data[1] = cpu_to_le32(0);
+
+	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+	if (status) {
+		atomic_dec(&rndis->notify_count);
+		pr_info("notify/0 --> %d\n", status);
+	}
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	struct f_rndis_qc		*rndis;
+	int				status = req->status;
+	struct usb_composite_dev	*cdev;
+	struct usb_ep *notify_ep;
+
+	spin_lock(&rndis_lock);
+	rndis = _rndis_qc;
+	if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	if (!rndis->func.config || !rndis->func.config->cdev) {
+		pr_err("%s(): cdev or config is NULL.\n", __func__);
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	cdev = rndis->func.config->cdev;
+
+	/* after TX:
+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+	 *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+	 */
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&rndis->notify_count, 0);
+		goto out;
+	default:
+		pr_info("RNDIS %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != rndis->notify)
+			goto out;
+
+		/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&rndis->notify_count))
+			goto out;
+		notify_ep = rndis->notify;
+		spin_unlock(&rndis_lock);
+		status = usb_ep_queue(notify_ep, req, GFP_ATOMIC);
+		if (status) {
+			spin_lock(&rndis_lock);
+			if (!_rndis_qc)
+				goto out;
+			atomic_dec(&_rndis_qc->notify_count);
+			DBG(cdev, "notify/1 --> %d\n", status);
+			spin_unlock(&rndis_lock);
+		}
+	}
+
+	return;
+
+out:
+	spin_unlock(&rndis_lock);
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+							struct usb_request *req)
+{
+	struct f_rndis_qc		*rndis;
+	int				status;
+	rndis_init_msg_type		*buf;
+	u32		ul_max_xfer_size, dl_max_xfer_size;
+
+	if (req->status != 0) {
+		pr_err("%s: RNDIS command completion error %d\n",
+				__func__, req->status);
+		return;
+	}
+
+	spin_lock(&rndis_lock);
+	rndis = _rndis_qc;
+	if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
+		spin_unlock(&rndis_lock);
+		return;
+	}
+
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+	status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
+	if (status < 0)
+		pr_err("RNDIS command error %d, %d/%d\n",
+			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params);
+		ipa_data_set_ul_max_xfer_size(ul_max_xfer_size);
+		/*
+		 * For consistent data throughput from IPA, it is required to
+		 * fine tune aggregation byte limit as 7KB. RNDIS IPA driver
+		 * use provided this value to calculate aggregation byte limit
+		 * and program IPA hardware for aggregation.
+		 * Host provides 8KB or 16KB as Max Transfer size, hence select
+		 * minimum out of host provided value and optimum transfer size
+		 * to get 7KB as aggregation byte limit.
+		 */
+		if (rndis_dl_max_xfer_size)
+			dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size,
+				rndis_get_dl_max_xfer_size(rndis->params));
+		else
+			dl_max_xfer_size =
+				rndis_get_dl_max_xfer_size(rndis->params);
+		ipa_data_set_dl_max_xfer_size(dl_max_xfer_size);
+	}
+	spin_unlock(&rndis_lock);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	pr_debug("%s: Enter\n", __func__);
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->complete = rndis_qc_command_complete;
+		/* later, rndis_response_available() sends a notification */
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		else {
+			u8 *buf;
+			u32 n;
+
+			/* return the result */
+			buf = rndis_get_next_response(rndis->params, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				req->complete = rndis_qc_response_complete;
+				rndis_free_response(rndis->params, buf);
+				value = n;
+			}
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->context = rndis;
+		req->zero = (value < w_length);
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("rndis response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+struct net_device *rndis_qc_get_net(const char *netname)
+{
+	struct net_device *net_dev;
+
+	net_dev = dev_get_by_name(&init_net, netname);
+	if (!net_dev)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Decrement net_dev refcount as it was incremented in
+	 * dev_get_by_name().
+	 */
+	dev_put(net_dev);
+	return net_dev;
+}
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned int intf,
+			unsigned int alt)
+{
+	struct f_rndis_qc	 *rndis = func_to_rndis_qc(f);
+	struct f_rndis_qc_opts *opts;
+	struct usb_composite_dev *cdev = f->config->cdev;
+	u8 src_connection_idx;
+	u8 dst_connection_idx;
+	enum usb_ctrl usb_bam_type;
+	int ret;
+
+	/* we know alt == 0 */
+
+	opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+	if (intf == rndis->ctrl_id) {
+		if (rndis->notify->driver_data) {
+			VDBG(cdev, "reset rndis control %d\n", intf);
+			usb_ep_disable(rndis->notify);
+		}
+		if (!rndis->notify->desc) {
+			VDBG(cdev, "init rndis ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+				goto fail;
+		}
+		usb_ep_enable(rndis->notify);
+		rndis->notify->driver_data = rndis;
+
+	} else if (intf == rndis->data_id) {
+		struct net_device	*net;
+
+		rndis->net_ready_trigger = false;
+		if (rndis->bam_port.in->driver_data) {
+			DBG(cdev, "reset rndis\n");
+			/* bam_port is needed for disconnecting the BAM data
+			 * path. Only after the BAM data path is disconnected,
+			 * we can disconnect the port from the network layer.
+			 */
+			ipa_data_disconnect(&rndis->bam_port,
+						USB_IPA_FUNC_RNDIS);
+		}
+
+		if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) {
+			DBG(cdev, "init rndis\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       rndis->bam_port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       rndis->bam_port.out)) {
+				rndis->bam_port.in->desc = NULL;
+				rndis->bam_port.out->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* RNDIS should be in the "RNDIS uninitialized" state,
+		 * either never activated or after rndis_uninit().
+		 *
+		 * We don't want data to flow here until a nonzero packet
+		 * filter is set, at which point it enters "RNDIS data
+		 * initialized" state ... but we do want the endpoints
+		 * to be activated.  It's a strange little state.
+		 *
+		 * REVISIT the RNDIS gadget code has done this wrong for a
+		 * very long time.  We need another call to the link layer
+		 * code -- gether_updown(...bool) maybe -- to do it right.
+		 */
+		rndis->cdc_filter = 0;
+
+		rndis->bam_port.cdev = cdev;
+		rndis->bam_port.func = &rndis->func;
+		ipa_data_port_select(USB_IPA_FUNC_RNDIS);
+		usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name);
+
+		src_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
+			rndis->port_num);
+		dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type,
+			IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
+			rndis->port_num);
+		if (src_connection_idx < 0 || dst_connection_idx < 0) {
+			pr_err("%s: usb_bam_get_connection_idx failed\n",
+				__func__);
+			return ret;
+		}
+		if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+				src_connection_idx, dst_connection_idx))
+			goto fail;
+
+		DBG(cdev, "RNDIS RX/TX early activation ...\n");
+		rndis_qc_open(rndis);
+		net = rndis_qc_get_net("rndis0");
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+		opts->net = net;
+
+		rndis_set_param_dev(rndis->params, net,
+				&rndis->cdc_filter);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	DBG(cdev, "rndis deactivated\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	rndis_uninit(rndis->params);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS);
+
+	msm_ep_unconfig(rndis->bam_port.out);
+	msm_ep_unconfig(rndis->bam_port.in);
+	usb_ep_disable(rndis->notify);
+	rndis->notify->driver_data = NULL;
+}
+
+static void rndis_qc_suspend(struct usb_function *f)
+{
+	struct f_rndis_qc	*rndis = func_to_rndis_qc(f);
+	bool remote_wakeup_allowed;
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n:",
+					__func__, remote_wakeup_allowed);
+
+	if (!remote_wakeup_allowed) {
+		/* This is required as Linux host side RNDIS driver doesn't
+		 * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus.
+		 * Hence we perform same operations explicitly here for Linux
+		 * host case. In case of windows, this RNDIS state machine is
+		 * already updated due to receiving of PACKET_FILTER.
+		 */
+		rndis_flow_control(rndis->params, true);
+		pr_debug("%s(): Disconnecting\n", __func__);
+	}
+
+	ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+			remote_wakeup_allowed);
+	pr_debug("rndis suspended\n");
+}
+
+static void rndis_qc_resume(struct usb_function *f)
+{
+	struct f_rndis_qc	*rndis = func_to_rndis_qc(f);
+	bool remote_wakeup_allowed;
+
+	pr_debug("%s: rndis resumed\n", __func__);
+
+	/* Nothing to do if DATA interface wasn't initialized */
+	if (!rndis->bam_port.cdev) {
+		pr_debug("data interface was not up\n");
+		return;
+	}
+
+	if (f->config->cdev->gadget->speed == USB_SPEED_SUPER)
+		remote_wakeup_allowed = f->func_wakeup_allowed;
+	else
+		remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+
+	ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS,
+				remote_wakeup_allowed);
+
+	if (!remote_wakeup_allowed) {
+		rndis_qc_open(rndis);
+		/*
+		 * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value
+		 * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus
+		 * resume. Hence trigger USB IPA transfer functionality
+		 * explicitly here. For Windows host case is also being
+		 * handle with RNDIS state machine.
+		 */
+		rndis_flow_control(rndis->params, false);
+	}
+
+	pr_debug("%s: RNDIS resume completed\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested.  A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct f_rndis_qc *rndis)
+{
+	struct usb_composite_dev *cdev = rndis->func.config->cdev;
+
+	DBG(cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
+				rndis_qc_bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->params);
+}
+
+void ipa_data_flow_control_enable(bool enable, struct rndis_params *param)
+{
+	if (enable)
+		ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS);
+	else
+		ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct rndis_params		*params;
+	int			status;
+	struct usb_ep		*ep;
+
+	/* maybe allocate device-global string IDs */
+	if (rndis_qc_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+		return status;
+		rndis_qc_string_defs[0].id = status;
+		rndis_qc_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[1].id = status;
+		rndis_qc_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[2].id = status;
+		rndis_qc_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->ctrl_id = status;
+	rndis_qc_iad_descriptor.bFirstInterface = status;
+
+	rndis_qc_control_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->data_id = status;
+
+	rndis_qc_data_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+	if (!ep)
+		goto fail;
+	rndis->bam_port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+	if (!ep)
+		goto fail;
+	rndis->bam_port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is, strictly speaking,
+	 * optional.  We don't treat it that way though!  It's simpler,
+	 * and some newer profiles don't treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	rndis->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!rndis->notify_req)
+		goto fail;
+	rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!rndis->notify_req->buf)
+		goto fail;
+	rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+	rndis->notify_req->context = rndis;
+	rndis->notify_req->complete = rndis_qc_response_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function);
+	if (!f->fs_descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rndis_qc_hs_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_hs_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_hs_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		rndis_qc_ss_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_ss_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_ss_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	params = rndis_register(rndis_qc_response_available, rndis,
+			ipa_data_flow_control_enable);
+	if (params < 0)
+		goto fail;
+	rndis->params = params;
+
+	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
+	rndis_set_host_mac(rndis->params, rndis->ethaddr);
+
+	if (rndis->manufacturer && rndis->vendorID &&
+		rndis_set_param_vendor(rndis->params, rndis->vendorID,
+			rndis->manufacturer))
+		goto fail;
+
+	pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__,
+				rndis->ul_max_pkt_per_xfer);
+	rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer);
+
+	/* In case of aggregated packets QC device will request
+	 * aliment to 4 (2^2).
+	 */
+	pr_debug("%s(): pkt_alignment_factor:%d\n", __func__,
+				rndis->pkt_alignment_factor);
+	rndis_set_pkt_alignment_factor(rndis->params,
+				rndis->pkt_alignment_factor);
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			rndis->bam_port.in->name, rndis->bam_port.out->name,
+			rndis->notify->name);
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+
+	if (rndis->notify_req) {
+		kfree(rndis->notify_req->buf);
+		usb_ep_free_request(rndis->notify, rndis->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (rndis->notify)
+		rndis->notify->driver_data = NULL;
+	if (rndis->bam_port.out->desc)
+		rndis->bam_port.out->driver_data = NULL;
+	if (rndis->bam_port.in->desc)
+		rndis->bam_port.in->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void rndis_qc_free(struct usb_function *f)
+{
+	struct f_rndis_qc_opts *opts;
+
+	opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
+	opts->refcnt--;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+
+	pr_debug("rndis_qc_unbind: free\n");
+	rndis_deregister(rndis->params);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->fs_descriptors);
+
+	kfree(rndis->notify_req->buf);
+	usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+	/*
+	 * call flush_workqueue to make sure that any pending
+	 * disconnect_work() from u_bam_data.c file is being
+	 * flushed before calling this rndis_ipa_cleanup API
+	 * as rndis ipa disconnect API is required to be
+	 * called before this.
+	 */
+	ipa_data_flush_workqueue();
+	rndis_ipa_cleanup(rndis_ipa_params.private);
+	rndis_ipa_supported = false;
+
+}
+
+void rndis_ipa_reset_trigger(void)
+{
+	struct f_rndis_qc *rndis;
+
+	rndis = _rndis_qc;
+	if (!rndis) {
+		pr_err("%s: No RNDIS instance", __func__);
+		return;
+	}
+
+	rndis->net_ready_trigger = false;
+}
+
+/*
+ * Callback let RNDIS_IPA trigger us when network interface is up
+ * and userspace is ready to answer DHCP requests
+ */
+void rndis_net_ready_notify(void)
+{
+	struct f_rndis_qc *rndis;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	rndis = _rndis_qc;
+	if (!rndis) {
+		pr_err("%s: No RNDIS instance", __func__);
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return;
+	}
+	if (rndis->net_ready_trigger) {
+		pr_err("%s: Already triggered", __func__);
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return;
+	}
+
+	pr_debug("%s: Set net_ready_trigger", __func__);
+	rndis->net_ready_trigger = true;
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS);
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+
+static struct
+usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
+				u32 vendorID, const char *manufacturer,
+				u8 max_pkt_per_xfer, u8 pkt_alignment_factor)
+{
+	struct f_rndis_qc_opts *opts = container_of(fi,
+				struct f_rndis_qc_opts, func_inst);
+	struct f_rndis_qc	*rndis;
+	int		status;
+
+	/* allocate and initialize one new instance */
+	status = -ENOMEM;
+
+	opts = container_of(fi, struct f_rndis_qc_opts, func_inst);
+
+	opts->refcnt++;
+	rndis = opts->rndis;
+
+	rndis->vendorID = opts->vendor_id;
+	rndis->manufacturer = opts->manufacturer;
+	/* export host's Ethernet address in CDC format */
+	random_ether_addr(rndis_ipa_params.host_ethaddr);
+	random_ether_addr(rndis_ipa_params.device_ethaddr);
+	pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
+		rndis_ipa_params.host_ethaddr,
+		rndis_ipa_params.device_ethaddr);
+	rndis_ipa_supported = true;
+	ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr);
+	rndis_ipa_params.device_ready_notify = rndis_net_ready_notify;
+
+	/* if max_pkt_per_xfer was not configured set to default value */
+	rndis->ul_max_pkt_per_xfer =
+			max_pkt_per_xfer ? max_pkt_per_xfer :
+			DEFAULT_MAX_PKT_PER_XFER;
+	ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer);
+
+	/*
+	 * Check no RNDIS aggregation, and alignment if not mentioned,
+	 * use alignment factor as zero. If aggregated RNDIS data transfer,
+	 * max packet per transfer would be default if it is not set
+	 * explicitly, and same way use alignment factor as 2 by default.
+	 * This would eliminate need of writing to sysfs if default RNDIS
+	 * aggregation setting required. Writing to both sysfs entries,
+	 * those values will always override default values.
+	 */
+	if ((rndis->pkt_alignment_factor == 0) &&
+			(rndis->ul_max_pkt_per_xfer == 1))
+		rndis->pkt_alignment_factor = 0;
+	else
+		rndis->pkt_alignment_factor = pkt_alignment_factor ?
+				pkt_alignment_factor :
+				DEFAULT_PKT_ALIGNMENT_FACTOR;
+
+	/* RNDIS activates when the host changes this filter */
+	rndis->cdc_filter = 0;
+
+	rndis->func.name = "rndis";
+	rndis->func.strings = rndis_qc_strings;
+	/* descriptors are per-instance copies */
+	rndis->func.bind = rndis_qc_bind;
+	rndis->func.unbind = rndis_qc_unbind;
+	rndis->func.set_alt = rndis_qc_set_alt;
+	rndis->func.setup = rndis_qc_setup;
+	rndis->func.disable = rndis_qc_disable;
+	rndis->func.suspend = rndis_qc_suspend;
+	rndis->func.resume = rndis_qc_resume;
+	rndis->func.free_func = rndis_qc_free;
+
+	status = rndis_ipa_init(&rndis_ipa_params);
+	if (status) {
+		pr_err("%s: failed to init rndis_ipa\n", __func__);
+		goto fail;
+	}
+
+	_rndis_qc = rndis;
+
+	return &rndis->func;
+fail:
+	kfree(rndis);
+	_rndis_qc = NULL;
+	return ERR_PTR(status);
+}
+
+static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi)
+{
+	return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0);
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	pr_info("Open rndis QC driver\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not created yet\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+		pr_err("Already opened\n");
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	fp->private_data = _rndis_qc;
+fail:
+	spin_unlock_irqrestore(&rndis_lock, flags);
+
+	if (!ret)
+		pr_info("rndis QC file opened\n");
+
+	return ret;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+	unsigned long flags;
+
+	pr_info("Close rndis QC file\n");
+
+	spin_lock_irqsave(&rndis_lock, flags);
+
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		spin_unlock_irqrestore(&rndis_lock, flags);
+		return -ENODEV;
+	}
+	rndis_qc_unlock(&_rndis_qc->open_excl);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	u8 qc_max_pkt_per_xfer = 0;
+	u32 qc_max_pkt_size = 0;
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rndis_lock, flags);
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer;
+	qc_max_pkt_size = _rndis_qc->max_pkt_size;
+
+	if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	spin_unlock_irqrestore(&rndis_lock, flags);
+
+	pr_info("Received command %d\n", cmd);
+
+	switch (cmd) {
+	case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+		ret = copy_to_user((void __user *)arg,
+					&qc_max_pkt_per_xfer,
+					sizeof(qc_max_pkt_per_xfer));
+		if (ret) {
+			pr_err("copying to user space failed\n");
+			ret = -EFAULT;
+		}
+		pr_info("Sent UL max packets per xfer %d\n",
+				qc_max_pkt_per_xfer);
+		break;
+	case RNDIS_QC_GET_MAX_PKT_SIZE:
+		ret = copy_to_user((void __user *)arg,
+					&qc_max_pkt_size,
+					sizeof(qc_max_pkt_size));
+		if (ret) {
+			pr_err("copying to user space failed\n");
+			ret = -EFAULT;
+		}
+		pr_debug("Sent max packet size %d\n",
+				qc_max_pkt_size);
+		break;
+	default:
+		pr_err("Unsupported IOCTL\n");
+		ret = -EINVAL;
+	}
+
+	spin_lock_irqsave(&rndis_lock, flags);
+
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not present\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	rndis_qc_unlock(&_rndis_qc->ioctl_excl);
+
+fail:
+	spin_unlock_irqrestore(&rndis_lock, flags);
+	return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+	.owner = THIS_MODULE,
+	.open = rndis_qc_open_dev,
+	.release = rndis_qc_release_dev,
+	.unlocked_ioctl	= rndis_qc_ioctl,
+};
+
+static void qcrndis_free_inst(struct usb_function_instance *f)
+{
+	struct f_rndis_qc_opts	*opts = container_of(f,
+				struct f_rndis_qc_opts, func_inst);
+	int minor = MINOR(opts->rndis->cdev.dev);
+	unsigned long flags;
+
+	device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+	class_destroy(rndis_class);
+	cdev_del(&opts->rndis->cdev);
+	ida_simple_remove(&chardev_ida, minor);
+	unregister_chrdev_region(rndis_dev, 1);
+
+	ipa_data_free(USB_IPA_FUNC_RNDIS);
+	spin_lock_irqsave(&rndis_lock, flags);
+	kfree(opts->rndis);
+	_rndis_qc = NULL;
+	kfree(opts);
+	spin_unlock_irqrestore(&rndis_lock, flags);
+}
+
+static int qcrndis_set_inst_name(struct usb_function_instance *fi,
+	const char *name)
+{
+	struct f_rndis_qc_opts	*opts = container_of(fi,
+				struct f_rndis_qc_opts, func_inst);
+	struct f_rndis_qc	*rndis;
+	int name_len;
+	int ret, minor;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	pr_debug("initialize rndis QC instance\n");
+	rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+	if (!rndis) {
+		pr_err("%s: fail allocate and initialize new instance\n",
+			   __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&rndis_lock);
+	opts->rndis = rndis;
+	rndis_class = class_create(THIS_MODULE, "usbrndis");
+	ret = alloc_chrdev_region(&rndis_dev, 0, 1, "usb_rndis");
+	if (ret < 0) {
+		pr_err("Fail to allocate usb rndis char dev region\n");
+		return ret;
+	}
+
+	/* get a minor number */
+	minor = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL);
+	if (minor < 0) {
+		pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+			minor);
+		ret = -ENODEV;
+		goto fail_out_of_minors;
+	}
+	rndis->dev = device_create(rndis_class, NULL,
+			MKDEV(MAJOR(rndis_dev), minor),
+			rndis, "android_rndis_qc");
+	if (IS_ERR(rndis->dev)) {
+		ret = PTR_ERR(rndis->dev);
+		pr_err("%s: device_create failed for (%d)", __func__, ret);
+		goto fail_return_minor;
+	}
+	cdev_init(&rndis->cdev, &rndis_qc_fops);
+	ret = cdev_add(&rndis->cdev, MKDEV(MAJOR(rndis_dev), minor), 1);
+	if (ret < 0) {
+		pr_err("%s: cdev_add failed for %s (%d)", __func__,
+			name, ret);
+		goto fail_cdev_add;
+	}
+
+	if (ret)
+		pr_err("rndis QC driver failed to register\n");
+
+	ret = ipa_data_setup(USB_IPA_FUNC_RNDIS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		goto fail_data_setup;
+	}
+
+	return 0;
+fail_data_setup:
+	cdev_del(&rndis->cdev);
+fail_cdev_add:
+	device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor));
+fail_return_minor:
+	ida_simple_remove(&chardev_ida, minor);
+fail_out_of_minors:
+	unregister_chrdev_region(rndis_dev, 1);
+	class_destroy(rndis_class);
+	kfree(rndis);
+	return ret;
+}
+
+static inline
+struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct f_rndis_qc_opts,
+				func_inst.group);
+}
+
+static void qcrndis_attr_release(struct config_item *item)
+{
+	struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations qcrndis_item_ops = {
+	.release        = qcrndis_attr_release,
+};
+
+static struct config_item_type qcrndis_func_type = {
+	.ct_item_ops    = &qcrndis_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct usb_function_instance *qcrndis_alloc_inst(void)
+{
+	struct f_rndis_qc_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = qcrndis_set_inst_name;
+	opts->func_inst.free_func_inst = qcrndis_free_inst;
+
+	config_group_init_type_name(&opts->func_inst.group, "",
+				&qcrndis_func_type);
+
+	return &opts->func_inst;
+}
+
+void *rndis_qc_get_ipa_rx_cb(void)
+{
+	return rndis_ipa_params.ipa_rx_notify;
+}
+
+void *rndis_qc_get_ipa_tx_cb(void)
+{
+	return rndis_ipa_params.ipa_tx_notify;
+}
+
+void *rndis_qc_get_ipa_priv(void)
+{
+	return rndis_ipa_params.private;
+}
+
+bool rndis_qc_get_skip_ep_config(void)
+{
+	return rndis_ipa_params.skip_ep_cfg;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
+
+static int __init usb_qcrndis_init(void)
+{
+	int ret;
+
+	ret = usb_function_register(&rndis_bamusb_func);
+	if (ret) {
+		pr_err("%s: failed to register diag %d\n", __func__, ret);
+		return ret;
+	}
+	return ret;
+}
+
+static void __exit usb_qcrndis_exit(void)
+{
+	usb_function_unregister(&rndis_bamusb_func);
+}
+
+module_init(usb_qcrndis_init);
+module_exit(usb_qcrndis_exit);
+MODULE_DESCRIPTION("USB RMNET Function Driver");
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index 4ba2e9b..72edb90 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -31,6 +31,9 @@
 	u32 peer_pipe_idx;
 	unsigned long usb_bam_handle;
 	struct sps_mem_buffer *data_fifo;
+	unsigned long qdss_bam_iova;
+	phys_addr_t qdss_bam_phys;
+	u32 qdss_bam_size;
 };
 
 struct gqdss {
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ac2231a..5d8e6fa 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -596,6 +596,7 @@
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
 
+	params->ul_max_xfer_size = le32_to_cpu(resp->MaxTransferSize);
 	params->resp_avail(params->v);
 	return 0;
 }
@@ -1015,6 +1016,18 @@
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_medium);
 
+u32 rndis_get_dl_max_xfer_size(struct rndis_params *params)
+{
+	pr_debug("%s:\n", __func__);
+	return params->dl_max_xfer_size;
+}
+
+u32 rndis_get_ul_max_xfer_size(struct rndis_params *params)
+{
+	pr_debug("%s:\n", __func__);
+	return params->ul_max_xfer_size;
+}
+
 void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
 {
 	pr_debug("%s:\n", __func__);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 4ffc282..a3051c4 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -194,6 +194,7 @@
 	u32			host_rndis_major_ver;
 	u32			host_rndis_minor_ver;
 	u32			dl_max_xfer_size;
+	u32			ul_max_xfer_size;
 	const char		*vendorDescr;
 	u8			pkt_alignment_factor;
 	void			(*resp_avail)(void *v);
@@ -216,6 +217,8 @@
 int  rndis_set_param_medium(struct rndis_params *params, u32 medium,
 			     u32 speed);
 void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
+u32  rndis_get_ul_max_xfer_size(struct rndis_params *params);
+u32  rndis_get_dl_max_xfer_size(struct rndis_params *params);
 void rndis_add_hdr(struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
 			struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
new file mode 100644
index 0000000..f379028
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -0,0 +1,1402 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/usb_bam.h>
+
+#include "u_data_ipa.h"
+#include "u_rmnet.h"
+
+struct ipa_data_ch_info {
+	struct usb_request			*rx_req;
+	struct usb_request			*tx_req;
+	unsigned long				flags;
+	unsigned int				id;
+	enum ipa_func_type			func_type;
+	bool					is_connected;
+	unsigned int				port_num;
+	spinlock_t				port_lock;
+
+	struct work_struct			connect_w;
+	struct work_struct			disconnect_w;
+	struct work_struct			suspend_w;
+	struct work_struct			resume_w;
+
+	u32					src_pipe_idx;
+	u32					dst_pipe_idx;
+	u8					src_connection_idx;
+	u8					dst_connection_idx;
+	enum usb_ctrl				usb_bam_type;
+	struct gadget_ipa_port			*port_usb;
+	struct usb_gadget			*gadget;
+	atomic_t				pipe_connect_notified;
+	struct usb_bam_connect_ipa_params	ipa_params;
+};
+
+struct rndis_data_ch_info {
+	/* this provides downlink (device->host i.e host) side configuration*/
+	u32	dl_max_transfer_size;
+	/* this provides uplink (host->device i.e device) side configuration */
+	u32	ul_max_transfer_size;
+	u32	ul_max_packets_number;
+	bool	ul_aggregation_enable;
+	u32	prod_clnt_hdl;
+	u32	cons_clnt_hdl;
+	void	*priv;
+};
+
+static struct workqueue_struct *ipa_data_wq;
+struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS];
+static struct rndis_data_ch_info *rndis_data;
+/**
+ * ipa_data_endless_complete() - completion callback for endless TX/RX request
+ * @ep: USB endpoint for which this completion happen
+ * @req: USB endless request
+ *
+ * This completion is being called when endless (TX/RX) transfer is terminated
+ * i.e. disconnect or suspend case.
+ */
+static void ipa_data_endless_complete(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	pr_debug("%s: endless complete for(%s) with status: %d\n",
+				__func__, ep->name, req->status);
+}
+
+/**
+ * ipa_data_start_endless_xfer() - configure USB endpoint and
+ * queue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX)
+ *
+ * It is being used to queue endless TX/RX request with UDC driver.
+ * It does set required DBM endpoint configuration before queueing endless
+ * TX/RX request.
+ */
+static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+	unsigned long flags;
+	int status;
+	struct usb_ep *ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || (in && !port->tx_req)
+				|| (!in && !port->rx_req)) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): port_usb/req is NULL.\n", __func__);
+		return;
+	}
+
+	if (in)
+		ep = port->port_usb->in;
+	else
+		ep = port->port_usb->out;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (in) {
+		pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__);
+		status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC);
+		if (status)
+			pr_err("error enqueuing endless TX_REQ, %d\n", status);
+	} else {
+		pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__);
+		status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC);
+		if (status)
+			pr_err("error enqueuing endless RX_REQ, %d\n", status);
+	}
+}
+
+/**
+ * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request
+ * @port: USB IPA data channel information
+ * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX
+ *
+ * It is being used to terminate and dequeue endless TX/RX request with UDC
+ * driver.
+ */
+static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in)
+{
+	unsigned long flags;
+	int status;
+	struct usb_ep *ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || (in && !port->tx_req)
+				|| (!in && !port->rx_req)) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): port_usb/req is NULL.\n", __func__);
+		return;
+	}
+
+	if (in)
+		ep = port->port_usb->in;
+	else
+		ep = port->port_usb->out;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (in) {
+		pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__);
+		status = usb_ep_dequeue(ep, port->tx_req);
+		if (status)
+			pr_err("error dequeueing endless TX_REQ, %d\n", status);
+	} else {
+		pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__);
+		status = usb_ep_dequeue(ep, port->rx_req);
+		if (status)
+			pr_err("error dequeueing endless RX_REQ, %d\n", status);
+	}
+}
+
+/*
+ * Called when IPA triggers us that the network interface is up.
+ *  Starts the transfers on bulk endpoints.
+ * (optimization reasons, the pipes and bam with IPA are already connected)
+ */
+void ipa_data_start_rx_tx(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info	*port;
+	unsigned long flags;
+	struct usb_ep *epin, *epout;
+
+	pr_debug("%s: Triggered: starting tx, rx", __func__);
+	/* queue in & out requests */
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL, can't start tx, rx", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb || !port->port_usb->in ||
+		!port->port_usb->out) {
+		pr_err("%s: Can't start tx, rx, ep not enabled", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	if (!port->rx_req || !port->tx_req) {
+		pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__,
+			port->rx_req, port->tx_req);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	if (!port->is_connected) {
+		pr_debug("%s: pipes are disconnected", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	epout = port->port_usb->out;
+	epin = port->port_usb->in;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* queue in & out requests */
+	pr_debug("%s: Starting rx", __func__);
+	if (epout)
+		ipa_data_start_endless_xfer(port, false);
+
+	pr_debug("%s: Starting tx", __func__);
+	if (epin)
+		ipa_data_start_endless_xfer(port, true);
+}
+/**
+ * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect
+ * @w: disconnect work
+ *
+ * It is being schedule from ipa_data_disconnect() API when particular function
+ * is being disable due to USB disconnect or USB composition switch is being
+ * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to
+ * handshake operation and involvement of SPS related APIs, this functioality
+ * can't be used from atomic context.
+ */
+static void ipa_data_disconnect_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								disconnect_w);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->is_connected) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_debug("Already disconnected.\n");
+		return;
+	}
+	port->is_connected = false;
+	pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__,
+			port->ipa_params.prod_clnt_hdl,
+			port->ipa_params.cons_clnt_hdl);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+	if (ret)
+		pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		/*
+		 * NOTE: it is required to disconnect USB and IPA BAM related
+		 * pipes before calling IPA tethered function related disconnect
+		 * API. IPA tethered function related disconnect API delete
+		 * depedency graph with IPA RM which would results into IPA not
+		 * pulling data although there is pending data on USB BAM
+		 * producer pipe.
+		 */
+		if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) {
+			void *priv;
+
+			priv = rndis_qc_get_ipa_priv();
+			rndis_ipa_pipe_disconnect_notify(priv);
+		}
+	}
+
+	if (port->ipa_params.prod_clnt_hdl)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+	if (port->ipa_params.cons_clnt_hdl)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+
+	if (port->func_type == USB_IPA_FUNC_RMNET)
+		teth_bridge_disconnect(port->ipa_params.src_client);
+	/*
+	 * Decrement usage count which was incremented
+	 * upon cable connect or cable disconnect in suspended state.
+	 */
+	usb_gadget_autopm_put_async(port->gadget);
+
+	pr_debug("%s(): disconnect work completed.\n", __func__);
+}
+
+/**
+ * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint
+ * @gp: USB gadget IPA Port
+ * @port_num: Port num used by function driver which need to be disable
+ *
+ * It is being called from atomic context from gadget driver when particular
+ * function is being disable due to USB cable disconnect or USB composition
+ * switch is being trigger. This API performs restoring USB endpoint operation
+ * and disable USB endpoint used for accelerated path.
+ */
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	struct usb_gadget *gadget = NULL;
+
+	pr_debug("dev:%pK port number:%d\n", gp, func);
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("port %u is NULL", func);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb) {
+		gadget = port->port_usb->cdev->gadget;
+		port->port_usb->ipa_consumer_ep = -1;
+		port->port_usb->ipa_producer_ep = -1;
+
+		if (port->port_usb->in) {
+			/*
+			 * Disable endpoints.
+			 * Unlocking is needed since disabling the eps might
+			 * stop active transfers and therefore the request
+			 * complete function will be called, where we try
+			 * to obtain the spinlock as well.
+			 */
+			msm_ep_unconfig(port->port_usb->in);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			usb_ep_disable(port->port_usb->in);
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->tx_req) {
+				usb_ep_free_request(port->port_usb->in,
+						port->tx_req);
+				port->tx_req = NULL;
+			}
+			port->port_usb->in->endless = false;
+		}
+
+		if (port->port_usb->out) {
+			msm_ep_unconfig(port->port_usb->out);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			usb_ep_disable(port->port_usb->out);
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->rx_req) {
+				usb_ep_free_request(port->port_usb->out,
+						port->rx_req);
+				port->rx_req = NULL;
+			}
+			port->port_usb->out->endless = false;
+		}
+
+		port->port_usb = NULL;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->disconnect_w);
+}
+
+/**
+ * configure_fifo() - Configure USB BAM Pipe's data FIFO
+ * @idx: USB BAM Pipe index
+ * @ep: USB endpoint
+ *
+ * This function configures USB BAM data fifo using fetched pipe configuraion
+ * using provided index value. This function needs to used before starting
+ * endless transfer.
+ */
+static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep)
+{
+	struct sps_mem_buffer data_fifo = {0};
+	u32 usb_bam_pipe_idx;
+
+	get_bam2bam_connection_info(bam_type, idx,
+				&usb_bam_pipe_idx,
+				NULL, &data_fifo, NULL);
+	msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size,
+			usb_bam_pipe_idx);
+}
+
+/**
+ * ipa_data_connect_work() - Perform USB IPA BAM connect
+ * @w: connect work
+ *
+ * It is being schedule from ipa_data_connect() API when particular function
+ * which is using USB IPA accelerated path. This API performs allocating request
+ * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be
+ * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also
+ * initiate USB IPA BAM pipe handshake for connect sequence.
+ */
+static void ipa_data_connect_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	struct gadget_ipa_port	*gport;
+	struct usb_gadget	*gadget = NULL;
+	struct teth_bridge_connect_params connect_params;
+	struct teth_bridge_init_params teth_bridge_params;
+	u32			sps_params;
+	int			ret;
+	unsigned long		flags;
+	bool			is_ipa_disconnected = true;
+
+	pr_debug("%s: Connect workqueue started\n", __func__);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		pr_err("%s(): port_usb is NULL.\n", __func__);
+		return;
+	}
+
+	gport = port->port_usb;
+	if (gport && gport->cdev)
+		gadget = gport->cdev->gadget;
+
+	if (!gadget) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		pr_err("%s: gport is NULL.\n", __func__);
+		return;
+	}
+
+	/*
+	 * check if connect_w got called two times during RNDIS resume as
+	 * explicit flow control is called to start data transfers after
+	 * ipa_data_connect()
+	 */
+	if (port->is_connected) {
+		pr_debug("IPA connect is already done & Transfers started\n");
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_gadget_autopm_put_async(port->gadget);
+		return;
+	}
+
+	gport->ipa_consumer_ep = -1;
+	gport->ipa_producer_ep = -1;
+
+	port->is_connected = true;
+
+	/* update IPA Parameteres here. */
+	port->ipa_params.usb_connection_speed = gadget->speed;
+	port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+	port->ipa_params.skip_ep_cfg = true;
+	port->ipa_params.keep_ipa_awake = true;
+	port->ipa_params.cons_clnt_hdl = -1;
+	port->ipa_params.prod_clnt_hdl = -1;
+
+	if (gport->out) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_bam_alloc_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (!port->port_usb || port->rx_req == NULL) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: port_usb is NULL, or rx_req cleaned\n",
+				__func__);
+			goto out;
+		}
+
+		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+				| MSM_PRODUCER | port->src_pipe_idx;
+		port->rx_req->length = 32*1024;
+		port->rx_req->udc_priv = sps_params;
+		configure_fifo(port->usb_bam_type,
+				port->src_connection_idx,
+				port->port_usb->out);
+		ret = msm_ep_config(gport->out);
+		if (ret) {
+			pr_err("msm_ep_config() failed for OUT EP\n");
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto out;
+		}
+	}
+
+	if (gport->in) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		usb_bam_alloc_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (!port->port_usb || port->tx_req == NULL) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: port_usb is NULL, or tx_req cleaned\n",
+				__func__);
+			goto unconfig_msm_ep_out;
+		}
+		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+						port->dst_pipe_idx;
+		port->tx_req->length = 32*1024;
+		port->tx_req->udc_priv = sps_params;
+		configure_fifo(port->usb_bam_type,
+				port->dst_connection_idx, gport->in);
+		ret = msm_ep_config(gport->in);
+		if (ret) {
+			pr_err("msm_ep_config() failed for IN EP\n");
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto unconfig_msm_ep_out;
+		}
+	}
+
+	if (port->func_type == USB_IPA_FUNC_RMNET) {
+		teth_bridge_params.client = port->ipa_params.src_client;
+		ret = teth_bridge_init(&teth_bridge_params);
+		if (ret) {
+			pr_err("%s:teth_bridge_init() failed\n", __func__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto unconfig_msm_ep_in;
+		}
+	}
+
+	/*
+	 * Perform below operations for Tx from Device (OUT transfer)
+	 * 1. Connect with pipe of USB BAM with IPA BAM pipe
+	 * 2. Update USB Endpoint related information using SPS Param.
+	 * 3. Configure USB Endpoint/DBM for the same.
+	 * 4. Override USB ep queue functionality for endless transfer.
+	 */
+	if (gport->out) {
+		pr_debug("configure bam ipa connect for USB OUT\n");
+		port->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+
+		if (port->func_type == USB_IPA_FUNC_RNDIS) {
+			port->ipa_params.notify = rndis_qc_get_ipa_rx_cb();
+			port->ipa_params.priv = rndis_qc_get_ipa_priv();
+			port->ipa_params.skip_ep_cfg =
+				rndis_qc_get_skip_ep_config();
+		} else if (port->func_type == USB_IPA_FUNC_RMNET) {
+			port->ipa_params.notify =
+				teth_bridge_params.usb_notify_cb;
+			port->ipa_params.priv =
+				teth_bridge_params.private_data;
+			port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+			port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+			port->ipa_params.skip_ep_cfg =
+				teth_bridge_params.skip_ep_cfg;
+		}
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_bam_connect_ipa(port->usb_bam_type,
+						&port->ipa_params);
+		if (ret) {
+			pr_err("usb_bam_connect_ipa out failed err:%d\n", ret);
+			goto disconnect_usb_bam_ipa_out;
+		}
+		spin_lock_irqsave(&port->port_lock, flags);
+		is_ipa_disconnected = false;
+		/* check if USB cable is disconnected or not */
+		if (!port->port_usb) {
+			pr_debug("%s:%d: cable is disconnected.\n",
+						__func__, __LINE__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto disconnect_usb_bam_ipa_out;
+		}
+
+		gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx;
+	}
+
+	if (gport->in) {
+		pr_debug("configure bam ipa connect for USB IN\n");
+		port->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+
+		if (port->func_type == USB_IPA_FUNC_RNDIS) {
+			port->ipa_params.notify = rndis_qc_get_ipa_tx_cb();
+			port->ipa_params.priv = rndis_qc_get_ipa_priv();
+			port->ipa_params.skip_ep_cfg =
+				rndis_qc_get_skip_ep_config();
+		} else if (port->func_type == USB_IPA_FUNC_RMNET) {
+			port->ipa_params.notify =
+				teth_bridge_params.usb_notify_cb;
+			port->ipa_params.priv =
+				teth_bridge_params.private_data;
+			port->ipa_params.reset_pipe_after_lpm =
+				msm_dwc3_reset_ep_after_lpm(gadget);
+			port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+			port->ipa_params.skip_ep_cfg =
+				teth_bridge_params.skip_ep_cfg;
+		}
+
+		if (port->func_type == USB_IPA_FUNC_DPL)
+			port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_bam_connect_ipa(port->usb_bam_type,
+						&port->ipa_params);
+		if (ret) {
+			pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret);
+			goto disconnect_usb_bam_ipa_out;
+		}
+		spin_lock_irqsave(&port->port_lock, flags);
+		is_ipa_disconnected = false;
+		/* check if USB cable is disconnected or not */
+		if (!port->port_usb) {
+			pr_debug("%s:%d: cable is disconnected.\n",
+						__func__, __LINE__);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			goto disconnect_usb_bam_ipa_out;
+		}
+
+		gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		rndis_data->prod_clnt_hdl =
+			port->ipa_params.prod_clnt_hdl;
+		rndis_data->cons_clnt_hdl =
+			port->ipa_params.cons_clnt_hdl;
+		rndis_data->priv = port->ipa_params.priv;
+
+		pr_debug("ul_max_transfer_size:%d\n",
+				rndis_data->ul_max_transfer_size);
+		pr_debug("ul_max_packets_number:%d\n",
+				rndis_data->ul_max_packets_number);
+		pr_debug("dl_max_transfer_size:%d\n",
+				rndis_data->dl_max_transfer_size);
+
+		ret = rndis_ipa_pipe_connect_notify(
+				rndis_data->cons_clnt_hdl,
+				rndis_data->prod_clnt_hdl,
+				rndis_data->ul_max_transfer_size,
+				rndis_data->ul_max_packets_number,
+				rndis_data->dl_max_transfer_size,
+				rndis_data->priv);
+		if (ret) {
+			pr_err("%s: failed to connect IPA: err:%d\n",
+				__func__, ret);
+			return;
+		}
+		atomic_set(&port->pipe_connect_notified, 1);
+	} else if (port->func_type == USB_IPA_FUNC_RMNET ||
+			port->func_type == USB_IPA_FUNC_DPL) {
+		/* For RmNet and DPL need to update_ipa_pipes to qti */
+		enum qti_port_type qti_port_type = port->func_type ==
+			USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL;
+		gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type,
+			gport->ipa_producer_ep, gport->ipa_consumer_ep);
+	}
+
+	if (port->func_type == USB_IPA_FUNC_RMNET) {
+		connect_params.ipa_usb_pipe_hdl =
+			port->ipa_params.prod_clnt_hdl;
+		connect_params.usb_ipa_pipe_hdl =
+			port->ipa_params.cons_clnt_hdl;
+		connect_params.tethering_mode =
+			TETH_TETHERING_MODE_RMNET;
+		connect_params.client_type =
+			port->ipa_params.src_client;
+		ret = teth_bridge_connect(&connect_params);
+		if (ret) {
+			pr_err("%s:teth_bridge_connect() failed\n", __func__);
+			goto disconnect_usb_bam_ipa_out;
+		}
+	}
+
+	pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n",
+				gport->ipa_producer_ep,
+				gport->ipa_consumer_ep);
+
+	pr_debug("src_bam_idx:%d dst_bam_idx:%d\n",
+			port->src_connection_idx, port->dst_connection_idx);
+
+	/* Don't queue the transfers yet, only after network stack is up */
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		pr_debug("%s: Not starting now, waiting for network notify",
+			__func__);
+		return;
+	}
+
+	if (gport->out)
+		ipa_data_start_endless_xfer(port, false);
+	if (gport->in)
+		ipa_data_start_endless_xfer(port, true);
+
+	pr_debug("Connect workqueue done (port %pK)", port);
+	return;
+
+disconnect_usb_bam_ipa_out:
+	if (!is_ipa_disconnected) {
+		usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params);
+		is_ipa_disconnected = true;
+	}
+	if (port->func_type == USB_IPA_FUNC_RMNET)
+		teth_bridge_disconnect(port->ipa_params.src_client);
+unconfig_msm_ep_in:
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb && gport->in)
+		msm_ep_unconfig(port->port_usb->in);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+unconfig_msm_ep_out:
+	if (gport->in)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->dst_connection_idx);
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb && gport->out)
+		msm_ep_unconfig(port->port_usb->out);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+	if (gport->out)
+		usb_bam_free_fifos(port->usb_bam_type,
+						port->src_connection_idx);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->is_connected = false;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	usb_gadget_autopm_put_async(port->gadget);
+}
+
+/**
+ * ipa_data_connect() - Prepare IPA params and enable USB endpoints
+ * @gp: USB IPA gadget port
+ * @port_num: port number used by accelerated function
+ * @src_connection_idx: USB BAM pipe index used as producer
+ * @dst_connection_idx: USB BAM pipe index used as consumer
+ *
+ * It is being called from accelerated function driver (from set_alt()) to
+ * initiate USB BAM IPA connection. This API is enabling accelerated endpoints
+ * and schedule connect_work() which establishes USB IPA BAM communication.
+ */
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		u8 src_connection_idx, u8 dst_connection_idx)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	int ret = 0;
+
+	pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n",
+			gp, func, src_connection_idx, dst_connection_idx);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid portno#%d\n", func);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	if (!gp) {
+		pr_err("gadget port is null\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	port = ipa_data_ports[func];
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gp;
+	port->gadget = gp->cdev->gadget;
+
+	if (gp->out) {
+		port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC);
+		if (!port->rx_req) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			pr_err("%s: failed to allocate rx_req\n", __func__);
+			goto err;
+		}
+		port->rx_req->context = port;
+		port->rx_req->complete = ipa_data_endless_complete;
+		port->rx_req->length = 0;
+		port->rx_req->no_interrupt = 1;
+	}
+
+	if (gp->in) {
+		port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC);
+		if (!port->tx_req) {
+			pr_err("%s: failed to allocate tx_req\n", __func__);
+			goto free_rx_req;
+		}
+		port->tx_req->context = port;
+		port->tx_req->complete = ipa_data_endless_complete;
+		port->tx_req->length = 0;
+		port->tx_req->no_interrupt = 1;
+	}
+	port->src_connection_idx = src_connection_idx;
+	port->dst_connection_idx = dst_connection_idx;
+	port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name);
+
+	port->ipa_params.src_pipe = &(port->src_pipe_idx);
+	port->ipa_params.dst_pipe = &(port->dst_pipe_idx);
+	port->ipa_params.src_idx = src_connection_idx;
+	port->ipa_params.dst_idx = dst_connection_idx;
+
+	/*
+	 * Disable Xfer complete and Xfer not ready interrupts by
+	 * marking endless flag which is used in UDC driver to enable
+	 * these interrupts. with this set, these interrupts for selected
+	 * endpoints won't be enabled.
+	 */
+	if (port->port_usb->in) {
+		port->port_usb->in->endless = true;
+		ret = usb_ep_enable(port->port_usb->in);
+		if (ret) {
+			pr_err("usb_ep_enable failed eptype:IN ep:%pK",
+						port->port_usb->in);
+			usb_ep_free_request(port->port_usb->in, port->tx_req);
+			port->tx_req = NULL;
+			port->port_usb->in->endless = false;
+			goto err_usb_in;
+		}
+	}
+
+	if (port->port_usb->out) {
+		port->port_usb->out->endless = true;
+		ret = usb_ep_enable(port->port_usb->out);
+		if (ret) {
+			pr_err("usb_ep_enable failed eptype:OUT ep:%pK",
+						port->port_usb->out);
+			usb_ep_free_request(port->port_usb->out, port->rx_req);
+			port->rx_req = NULL;
+			port->port_usb->out->endless = false;
+			goto err_usb_out;
+		}
+	}
+
+	/* Wait for host to enable flow_control */
+	if (port->func_type == USB_IPA_FUNC_RNDIS) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = 0;
+		return ret;
+	}
+
+	/*
+	 * Increment usage count upon cable connect. Decrement after IPA
+	 * handshake is done in disconnect work (due to cable disconnect)
+	 * or in suspend work.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+
+	queue_work(ipa_data_wq, &port->connect_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return ret;
+
+err_usb_out:
+	if (port->port_usb->in) {
+		usb_ep_disable(port->port_usb->in);
+		port->port_usb->in->endless = false;
+	}
+err_usb_in:
+	if (gp->in && port->tx_req) {
+		usb_ep_free_request(gp->in, port->tx_req);
+		port->tx_req = NULL;
+	}
+free_rx_req:
+	if (gp->out && port->rx_req) {
+		usb_ep_free_request(gp->out, port->rx_req);
+		port->rx_req = NULL;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+err:
+	pr_debug("%s(): failed with error:%d\n", __func__, ret);
+	return ret;
+}
+
+/**
+ * ipa_data_start() - Restart USB endless transfer
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to restart USB endless transfer for USB bus resume.
+ * For USB consumer case, it restarts USB endless RX transfer, whereas
+ * for USB producer case, it resets DBM endpoint and restart USB endless
+ * TX transfer.
+ */
+static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir)
+{
+	struct ipa_data_ch_info *port = param;
+	struct usb_gadget *gadget = NULL;
+
+	if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+		pr_err("%s:port,cdev or gadget is  NULL\n", __func__);
+		return;
+	}
+
+	gadget = port->port_usb->cdev->gadget;
+	if (dir == USB_TO_PEER_PERIPHERAL) {
+		pr_debug("%s(): start endless RX\n", __func__);
+		ipa_data_start_endless_xfer(port, false);
+	} else {
+		pr_debug("%s(): start endless TX\n", __func__);
+		if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+			configure_fifo(port->usb_bam_type,
+				 port->dst_connection_idx, port->port_usb->in);
+		}
+		ipa_data_start_endless_xfer(port, true);
+	}
+}
+
+/**
+ * ipa_data_stop() - Stop endless Tx/Rx transfers
+ * @param: IPA data channel information
+ * @dir: USB BAM pipe direction
+ *
+ * It is being used to stop endless Tx/Rx transfers. It is being used
+ * for USB bus suspend functionality.
+ */
+static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir)
+{
+	struct ipa_data_ch_info *port = param;
+	struct usb_gadget *gadget = NULL;
+
+	if (!port || !port->port_usb || !port->port_usb->cdev->gadget) {
+		pr_err("%s:port,cdev or gadget is  NULL\n", __func__);
+		return;
+	}
+
+	gadget = port->port_usb->cdev->gadget;
+	if (dir == USB_TO_PEER_PERIPHERAL) {
+		pr_debug("%s(): stop endless RX transfer\n", __func__);
+		ipa_data_stop_endless_xfer(port, false);
+	} else {
+		pr_debug("%s(): stop endless TX transfer\n", __func__);
+		ipa_data_stop_endless_xfer(port, true);
+	}
+}
+
+void ipa_data_flush_workqueue(void)
+{
+	pr_debug("%s(): Flushing workqueue\n", __func__);
+	flush_workqueue(ipa_data_wq);
+}
+
+/**
+ * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB BAM IPA suspend functionality
+ * for USB bus suspend functionality.
+ */
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			bool remote_wakeup_enabled)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+	pr_debug("%s: suspended port %d\n", __func__, func);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s(): Port is NULL.\n", __func__);
+		return;
+	}
+
+	/* suspend with remote wakeup disabled */
+	if (!remote_wakeup_enabled) {
+		/*
+		 * When remote wakeup is disabled, IPA BAM is disconnected
+		 * because it cannot send new data until the USB bus is resumed.
+		 * Endpoint descriptors info is saved before it gets reset by
+		 * the BAM disconnect API. This lets us restore this info when
+		 * the USB bus is resumed.
+		 */
+		if (gp->in) {
+			gp->in_ep_desc_backup = gp->in->desc;
+			pr_debug("in_ep_desc_backup = %pK\n",
+				gp->in_ep_desc_backup);
+		}
+		if (gp->out) {
+			gp->out_ep_desc_backup = gp->out->desc;
+			pr_debug("out_ep_desc_backup = %pK\n",
+				gp->out_ep_desc_backup);
+		}
+		ipa_data_disconnect(gp, func);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->suspend_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	unsigned long flags;
+	int ret;
+
+	pr_debug("%s: suspend started\n", __func__);
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/* In case of RNDIS, host enables flow_control invoking connect_w. If it
+	 * is delayed then we may end up having suspend_w run before connect_w.
+	 * In this scenario, connect_w may or may not at all start if cable gets
+	 * disconnected or if host changes configuration e.g. RNDIS --> MBIM
+	 * For these cases don't do runtime_put as there was no _get yet, and
+	 * detect this condition on disconnect to not do extra pm_runtme_get
+	 * for SUSPEND --> DISCONNECT scenario.
+	 */
+	if (!port->is_connected) {
+		pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	ret = usb_bam_register_wake_cb(port->usb_bam_type,
+			port->dst_connection_idx, NULL, port);
+	if (ret) {
+		pr_err("%s(): Failed to register BAM wake callback.\n",
+				__func__);
+		return;
+	}
+
+	usb_bam_register_start_stop_cbs(port->usb_bam_type,
+			port->dst_connection_idx, ipa_data_start,
+			ipa_data_stop, port);
+	/*
+	 * release lock here because bam_data_start() or
+	 * bam_data_stop() called from usb_bam_suspend()
+	 * re-acquires port lock.
+	 */
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	usb_bam_suspend(port->usb_bam_type, &port->ipa_params);
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/*
+	 * Decrement usage count after IPA handshake is done
+	 * to allow gadget parent to go to lpm. This counter was
+	 * incremented upon cable connect.
+	 */
+	usb_gadget_autopm_put_async(port->gadget);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_resume() - Initiate USB resume functionality
+ * @gp: Gadget IPA port
+ * @port_num: port number used by function
+ *
+ * It is being used to initiate USB resume functionality
+ * for USB bus resume case.
+ */
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			bool remote_wakeup_enabled)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+	struct usb_gadget *gadget = NULL;
+	u8 src_connection_idx = 0;
+	u8 dst_connection_idx = 0;
+	enum usb_ctrl usb_bam_type;
+
+	pr_debug("dev:%pK port number:%d\n", gp, func);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("invalid ipa portno#%d\n", func);
+		return;
+	}
+
+	if (!gp) {
+		pr_err("data port is null\n");
+		return;
+	}
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("port %u is NULL", func);
+		return;
+	}
+
+	gadget = gp->cdev->gadget;
+	/* resume with remote wakeup disabled */
+	if (!remote_wakeup_enabled) {
+		int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0;
+
+		usb_bam_type = usb_bam_get_bam_type(gadget->name);
+		/* Restore endpoint descriptors info. */
+		if (gp->in) {
+			gp->in->desc = gp->in_ep_desc_backup;
+			pr_debug("in_ep_desc_backup = %pK\n",
+				gp->in_ep_desc_backup);
+			dst_connection_idx = usb_bam_get_connection_idx(
+				usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB,
+				USB_BAM_DEVICE, bam_pipe_num);
+		}
+		if (gp->out) {
+			gp->out->desc = gp->out_ep_desc_backup;
+			pr_debug("out_ep_desc_backup = %pK\n",
+				gp->out_ep_desc_backup);
+			src_connection_idx = usb_bam_get_connection_idx(
+				usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL,
+				USB_BAM_DEVICE, bam_pipe_num);
+		}
+		ipa_data_connect(gp, func,
+				src_connection_idx, dst_connection_idx);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/*
+	 * Increment usage count here to disallow gadget
+	 * parent suspend. This counter will decrement
+	 * after IPA handshake is done in disconnect work
+	 * (due to cable disconnect) or in bam_data_disconnect
+	 * in suspended state.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+	queue_work(ipa_data_wq, &port->resume_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+	struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info,
+								connect_w);
+	struct usb_gadget *gadget;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb || !port->port_usb->cdev) {
+		pr_err("port->port_usb or cdev is NULL");
+		goto exit;
+	}
+
+	if (!port->port_usb->cdev->gadget) {
+		pr_err("port->port_usb->cdev->gadget is NULL");
+		goto exit;
+	}
+
+	pr_debug("%s: resume started\n", __func__);
+	gadget = port->port_usb->cdev->gadget;
+	if (!gadget) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): Gadget is NULL.\n", __func__);
+		return;
+	}
+
+	ret = usb_bam_register_wake_cb(port->usb_bam_type,
+				port->dst_connection_idx, NULL, NULL);
+	if (ret) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s(): Failed to register BAM wake callback.\n",
+								__func__);
+		return;
+	}
+
+	if (msm_dwc3_reset_ep_after_lpm(gadget)) {
+		configure_fifo(port->usb_bam_type, port->src_connection_idx,
+				port->port_usb->out);
+		configure_fifo(port->usb_bam_type, port->dst_connection_idx,
+				port->port_usb->in);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		msm_dwc3_reset_dbm_ep(port->port_usb->in);
+		spin_lock_irqsave(&port->port_lock, flags);
+		usb_bam_resume(port->usb_bam_type, &port->ipa_params);
+	}
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/**
+ * ipa_data_port_alloc() - Allocate IPA USB Port structure
+ * @portno: port number to be used by particular USB function
+ *
+ * It is being used by USB function driver to allocate IPA data port
+ * for USB IPA data accelerated path.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+static int ipa_data_port_alloc(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port = NULL;
+
+	if (ipa_data_ports[func] != NULL) {
+		pr_debug("port %d already allocated.\n", func);
+		return 0;
+	}
+
+	port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ipa_data_ports[func] = port;
+
+	pr_debug("port:%pK with portno:%d allocated\n", port, func);
+	return 0;
+}
+
+/**
+ * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode
+ * @portno: port number to be used by particular USB function
+ * @func_type: USB gadget function type
+ *
+ * It is being used by USB function driver to select which BAM2BAM IPA
+ * port particular USB function wants to use.
+ *
+ */
+void ipa_data_port_select(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port = NULL;
+
+	pr_debug("portno:%d\n", func);
+
+	port = ipa_data_ports[func];
+	port->port_num  = func;
+	port->is_connected = false;
+
+	spin_lock_init(&port->port_lock);
+
+	if (!work_pending(&port->connect_w))
+		INIT_WORK(&port->connect_w, ipa_data_connect_work);
+
+	if (!work_pending(&port->disconnect_w))
+		INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work);
+
+	INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+	INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
+
+	port->ipa_params.src_client = IPA_CLIENT_USB_PROD;
+	port->ipa_params.dst_client = IPA_CLIENT_USB_CONS;
+	port->func_type = func;
+};
+
+void ipa_data_free(enum ipa_func_type func)
+{
+	pr_debug("freeing %d IPA BAM port", func);
+
+	kfree(ipa_data_ports[func]);
+	ipa_data_ports[func] = NULL;
+	if (func == USB_IPA_FUNC_RNDIS)
+		kfree(rndis_data);
+	if (ipa_data_wq) {
+		destroy_workqueue(ipa_data_wq);
+		ipa_data_wq = NULL;
+	}
+}
+
+/**
+ * ipa_data_setup() - setup BAM2BAM IPA port
+ *
+ * Each USB function who wants to use BAM2BAM IPA port would
+ * be counting number of IPA port to use and initialize those
+ * ports at time of bind_config() in android gadget driver.
+ *
+ * Retrun: 0 in case of success, otherwise errno.
+ */
+int ipa_data_setup(enum ipa_func_type func)
+{
+	int ret;
+
+	pr_debug("requested %d IPA BAM port", func);
+
+	if (func >= USB_IPA_NUM_FUNCS) {
+		pr_err("Invalid num of ports count:%d\n", func);
+		return -EINVAL;
+	}
+
+	ret = ipa_data_port_alloc(func);
+	if (ret) {
+		pr_err("Failed to alloc port:%d\n", func);
+		return ret;
+	}
+
+	if (func == USB_IPA_FUNC_RNDIS) {
+		rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL);
+		if (!rndis_data) {
+			pr_err("%s: fail allocate and initialize new instance\n",
+				__func__);
+			goto free_ipa_ports;
+		}
+	}
+	if (ipa_data_wq) {
+		pr_debug("ipa_data_wq is already setup.");
+		return 0;
+	}
+
+	ipa_data_wq = alloc_workqueue("k_usb_ipa_data",
+				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!ipa_data_wq) {
+		pr_err("Failed to create workqueue\n");
+		ret = -ENOMEM;
+		goto free_rndis_data;
+	}
+
+	return 0;
+
+free_rndis_data:
+	if (func == USB_IPA_FUNC_RNDIS)
+		kfree(rndis_data);
+free_ipa_ports:
+	kfree(ipa_data_ports[func]);
+	ipa_data_ports[func] = NULL;
+
+	return ret;
+}
+
+void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size)
+{
+	if (!max_transfer_size) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+	rndis_data->ul_max_transfer_size = max_transfer_size;
+	pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size)
+{
+
+	if (!max_transfer_size) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+	rndis_data->dl_max_transfer_size = max_transfer_size;
+	pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size);
+}
+
+void ipa_data_set_ul_max_pkt_num(u8 max_packets_number)
+{
+	if (!max_packets_number) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return;
+	}
+
+	rndis_data->ul_max_packets_number = max_packets_number;
+
+	if (max_packets_number > 1)
+		rndis_data->ul_aggregation_enable = true;
+	else
+		rndis_data->ul_aggregation_enable = false;
+
+	pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n",
+				__func__, rndis_data->ul_aggregation_enable,
+				max_packets_number);
+}
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+
+	pr_debug("%s\n", __func__);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL", __func__);
+		return;
+	}
+
+	if (atomic_read(&port->pipe_connect_notified)) {
+		pr_debug("%s: Transfers already started?\n", __func__);
+		return;
+	}
+	/*
+	 * Increment usage count upon cable connect. Decrement after IPA
+	 * handshake is done in disconnect work due to cable disconnect
+	 * or in suspend work.
+	 */
+	usb_gadget_autopm_get_noresume(port->gadget);
+	queue_work(ipa_data_wq, &port->connect_w);
+}
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func)
+{
+	struct ipa_data_ch_info *port;
+	unsigned long flags;
+
+	pr_debug("%s\n", __func__);
+
+	port = ipa_data_ports[func];
+	if (!port) {
+		pr_err("%s: port is NULL", __func__);
+		return;
+	}
+
+	if (!atomic_read(&port->pipe_connect_notified))
+		return;
+
+	rndis_ipa_reset_trigger();
+	ipa_data_stop_endless_xfer(port, true);
+	ipa_data_stop_endless_xfer(port, false);
+	spin_lock_irqsave(&port->port_lock, flags);
+	/* check if USB cable is disconnected or not */
+	if (port->port_usb) {
+		msm_ep_unconfig(port->port_usb->in);
+		msm_ep_unconfig(port->port_usb->out);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	queue_work(ipa_data_wq, &port->disconnect_w);
+}
diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h
new file mode 100644
index 0000000..70d4293
--- /dev/null
+++ b/drivers/usb/gadget/function/u_data_ipa.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_DATA_IPA_H
+#define __U_DATA_IPA_H
+
+#include <linux/usb/composite.h>
+#include <linux/rndis_ipa.h>
+#include <linux/usb/gadget.h>
+#include <linux/cdev.h>
+#include <linux/ipa_usb.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+enum ipa_func_type {
+	USB_IPA_FUNC_ECM,
+	USB_IPA_FUNC_MBIM,
+	USB_IPA_FUNC_RMNET,
+	USB_IPA_FUNC_RNDIS,
+	USB_IPA_FUNC_DPL,
+	USB_IPA_NUM_FUNCS,
+};
+
+/* Max Number of IPA data ports supported */
+#define IPA_N_PORTS USB_IPA_NUM_FUNCS
+
+struct gadget_ipa_port {
+	struct usb_composite_dev	*cdev;
+	struct usb_function		*func;
+	int				rx_buffer_size;
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+	int				ipa_consumer_ep;
+	int				ipa_producer_ep;
+	const struct usb_endpoint_descriptor	*in_ep_desc_backup;
+	const struct usb_endpoint_descriptor	*out_ep_desc_backup;
+
+};
+
+struct ipa_function_bind_info {
+	struct usb_string *string_defs;
+	int data_str_idx;
+	struct usb_interface_descriptor *data_desc;
+	struct usb_endpoint_descriptor *fs_in_desc;
+	struct usb_endpoint_descriptor *fs_out_desc;
+	struct usb_endpoint_descriptor *fs_notify_desc;
+	struct usb_endpoint_descriptor *hs_in_desc;
+	struct usb_endpoint_descriptor *hs_out_desc;
+	struct usb_endpoint_descriptor *hs_notify_desc;
+	struct usb_endpoint_descriptor *ss_in_desc;
+	struct usb_endpoint_descriptor *ss_out_desc;
+	struct usb_endpoint_descriptor *ss_notify_desc;
+
+	struct usb_descriptor_header **fs_desc_hdr;
+	struct usb_descriptor_header **hs_desc_hdr;
+	struct usb_descriptor_header **ss_desc_hdr;
+};
+
+/* for configfs support */
+#define MAX_INST_NAME_LEN      40
+
+struct f_rndis_qc_opts {
+	struct usb_function_instance	func_inst;
+	struct f_rndis_qc		*rndis;
+	u32				vendor_id;
+	const char			*manufacturer;
+	struct net_device		*net;
+	int				refcnt;
+};
+
+struct f_rmnet_opts {
+	struct usb_function_instance func_inst;
+	struct f_rmnet *dev;
+	int refcnt;
+};
+
+void ipa_data_port_select(enum ipa_func_type func);
+void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func);
+int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func,
+			u8 src_connection_idx, u8 dst_connection_idx);
+int ipa_data_setup(enum ipa_func_type func);
+void ipa_data_free(enum ipa_func_type func);
+
+void ipa_data_flush_workqueue(void);
+void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		bool remote_wakeup_enabled);
+void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func,
+		bool remote_wakeup_enabled);
+
+void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size);
+
+void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size);
+
+void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number);
+
+void ipa_data_start_rx_tx(enum ipa_func_type func);
+
+void ipa_data_start_rndis_ipa(enum ipa_func_type func);
+
+void ipa_data_stop_rndis_ipa(enum ipa_func_type func);
+
+void *rndis_qc_get_ipa_priv(void);
+void *rndis_qc_get_ipa_rx_cb(void);
+bool rndis_qc_get_skip_ep_config(void);
+void *rndis_qc_get_ipa_tx_cb(void);
+void rndis_ipa_reset_trigger(void);
+#if IS_ENABLED(CONFIG_USB_CONFIGFS_RMNET_BAM)
+void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport,
+				u32 ipa_prod, u32 ipa_cons);
+#else
+static inline void gqti_ctrl_update_ipa_pipes(void *gr,
+				enum qti_port_type qport,
+				u32 ipa_prod, u32 ipa_cons)
+{
+}
+#endif /* CONFIG_USB_CONFIGFS_RMNET_BAM */
+#endif
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index 06eecd1..b4353ac 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -47,6 +47,8 @@
 	int			idx;
 	struct usb_qdss_bam_connect_info bam_info;
 	struct usb_gadget *gadget;
+	struct device *dev;
+	int ret;
 
 	pr_debug("set_qdss_data_connection\n");
 
@@ -57,6 +59,7 @@
 
 	gadget = qdss->gadget;
 	usb_bam_type = usb_bam_get_bam_type(gadget->name);
+	dev = gadget->dev.parent;
 
 	bam_info = qdss->bam_info;
 	/* There is only one qdss pipe, so the pipe number can be set to 0 */
@@ -68,6 +71,23 @@
 	}
 
 	if (enable) {
+		ret = get_qdss_bam_info(usb_bam_type, idx,
+				&bam_info.qdss_bam_phys,
+				&bam_info.qdss_bam_size);
+		if (ret) {
+			pr_err("%s(): failed to get qdss bam info err(%d)\n",
+								__func__, ret);
+			return ret;
+		}
+
+		bam_info.qdss_bam_iova = dma_map_resource(dev->parent,
+				bam_info.qdss_bam_phys, bam_info.qdss_bam_size,
+				DMA_BIDIRECTIONAL, 0);
+		if (!bam_info.qdss_bam_iova) {
+			pr_err("dma_map_resource failed\n");
+			return -ENOMEM;
+		}
+
 		usb_bam_alloc_fifos(usb_bam_type, idx);
 		bam_info.data_fifo =
 			kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
@@ -76,25 +96,34 @@
 			usb_bam_free_fifos(usb_bam_type, idx);
 			return -ENOMEM;
 		}
+
+		pr_debug("%s(): qdss_bam: iova:%lx p_addr:%lx size:%x\n",
+				__func__, bam_info.qdss_bam_iova,
+				(unsigned long)bam_info.qdss_bam_phys,
+				bam_info.qdss_bam_size);
+
 		get_bam2bam_connection_info(usb_bam_type, idx,
 				&bam_info.usb_bam_pipe_idx,
 				NULL, bam_info.data_fifo, NULL);
 
 		alloc_sps_req(qdss->port.data);
 		msm_data_fifo_config(qdss->port.data,
-					bam_info.data_fifo->phys_base,
-					bam_info.data_fifo->size,
-					bam_info.usb_bam_pipe_idx);
+			bam_info.data_fifo->iova,
+			bam_info.data_fifo->size,
+			bam_info.usb_bam_pipe_idx);
 		init_data(qdss->port.data);
 
 		res = usb_bam_connect(usb_bam_type, idx,
-					&(bam_info.usb_bam_pipe_idx));
+					&(bam_info.usb_bam_pipe_idx),
+					bam_info.qdss_bam_iova);
 	} else {
-		kfree(bam_info.data_fifo);
 		res = usb_bam_disconnect_pipe(usb_bam_type, idx);
 		if (res)
 			pr_err("usb_bam_disconnection error\n");
+		dma_unmap_resource(dev->parent, bam_info.qdss_bam_iova,
+				bam_info.qdss_bam_size, DMA_BIDIRECTIONAL, 0);
 		usb_bam_free_fifos(usb_bam_type, idx);
+		kfree(bam_info.data_fifo);
 	}
 
 	return res;
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
new file mode 100644
index 0000000..0126932
--- /dev/null
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "f_qdss.h"
+
+struct rmnet_ctrl_pkt {
+	void	*buf;
+	int	len;
+	struct list_head	list;
+};
+
+struct grmnet {
+	/* to usb host, aka laptop, windows pc etc. Will
+	 * be filled by usb driver of rmnet functionality
+	 */
+	int (*send_cpkt_response)(void *g, void *buf, size_t len);
+
+	/* to modem, and to be filled by driver implementing
+	 * control function
+	 */
+	int (*send_encap_cmd)(enum qti_port_type qport, void *buf, size_t len);
+	void (*notify_modem)(void *g, enum qti_port_type qport, int cbits);
+
+	void (*disconnect)(struct grmnet *g);
+	void (*connect)(struct grmnet *g);
+};
+
+enum ctrl_client {
+	FRMNET_CTRL_CLIENT,
+	GPS_CTRL_CLIENT,
+
+	NR_CTRL_CLIENTS
+};
+
+int gqti_ctrl_connect(void *gr, enum qti_port_type qport, unsigned int intf);
+void gqti_ctrl_disconnect(void *gr, enum qti_port_type qport);
+int gqti_ctrl_init(void);
+void gqti_ctrl_cleanup(void);
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 588546a..c7596a7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -220,7 +220,15 @@
 	 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
 	 */
 	sysdev = &pdev->dev;
-	if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+	/*
+	 * If sysdev->parent->parent is available and part of IOMMU group
+	 * (indicating possible usage of SMMU enablement), then use
+	 * sysdev->parent->parent as sysdev.
+	 */
+	if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node &&
+		sysdev->parent->parent && sysdev->parent->parent->iommu_group)
+		sysdev = sysdev->parent->parent;
+	else if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
 		sysdev = sysdev->parent;
 #ifdef CONFIG_PCI
 	else if (sysdev->parent && sysdev->parent->parent &&
@@ -316,7 +324,7 @@
 	if (device_property_read_u32(&pdev->dev, "xhci-imod-value", &imod))
 		imod = 0;
 
-	if (device_property_read_u32(sysdev, "usb-core-id", &xhci->core_id))
+	if (device_property_read_u32(&pdev->dev, "usb-core-id", &xhci->core_id))
 		xhci->core_id = -EINVAL;
 
 	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 0091f41..56b2a6d 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2099,7 +2099,6 @@
 		if (pd->current_pr == PR_SINK) {
 			usbpd_set_state(pd, PE_SNK_STARTUP);
 		} else if (pd->current_pr == PR_SRC) {
-			enable_vbus(pd);
 			if (!pd->vconn_enabled &&
 					pd->typec_mode ==
 					POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
@@ -2109,6 +2108,7 @@
 				else
 					pd->vconn_enabled = true;
 			}
+			enable_vbus(pd);
 
 			usbpd_set_state(pd, PE_SRC_STARTUP);
 		}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index f7ff9e8f..9c33c6e 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -45,6 +45,8 @@
 #define FREEZIO_N			BIT(1)
 #define POWER_DOWN			BIT(0)
 
+#define QUSB2PHY_PORT_TEST_CTRL		0xB8
+
 #define QUSB2PHY_PWR_CTRL1		0x210
 #define PWR_CTRL1_CLAMP_N_EN		BIT(1)
 #define PWR_CTRL1_POWR_DOWN		BIT(0)
@@ -68,10 +70,7 @@
 #define QUSB2PHY_PORT_TUNE2             0x84
 #define QUSB2PHY_PORT_TUNE3             0x88
 #define QUSB2PHY_PORT_TUNE4             0x8C
-
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
+#define QUSB2PHY_PORT_TUNE5             0x90
 
 /* Get TUNE2's high nibble value read from efuse */
 #define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
@@ -98,21 +97,42 @@
 
 #define QUSB2PHY_REFCLK_ENABLE		BIT(0)
 
-unsigned int tune2;
-module_param(tune2, uint, S_IRUGO | S_IWUSR);
+static unsigned int tune1;
+module_param(tune1, uint, 0644);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
+static unsigned int tune2;
+module_param(tune2, uint, 0644);
 MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
 
+static unsigned int tune3;
+module_param(tune3, uint, 0644);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+static unsigned int tune4;
+module_param(tune4, uint, 0644);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+static unsigned int tune5;
+module_param(tune5, uint, 0644);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
 struct qusb_phy {
 	struct usb_phy		phy;
 	void __iomem		*base;
 	void __iomem		*tune2_efuse_reg;
 	void __iomem		*ref_clk_base;
+	void __iomem		*tcsr_clamp_dig_n;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*cfg_ahb_clk;
 	struct reset_control	*phy_reset;
+	struct clk		*iface_clk;
+	struct clk		*core_clk;
 
+	struct regulator	*gdsc;
 	struct regulator	*vdd;
 	struct regulator	*vdda33;
 	struct regulator	*vdda18;
@@ -124,6 +144,7 @@
 	u32			tune2_val;
 	int			tune2_efuse_bit_pos;
 	int			tune2_efuse_num_of_bits;
+	int			tune2_efuse_correction;
 
 	bool			power_enabled;
 	bool			clocks_enabled;
@@ -145,6 +166,8 @@
 	int			phy_pll_reset_seq_len;
 	int			*emu_dcm_reset_seq;
 	int			emu_dcm_reset_seq_len;
+	bool			put_into_high_z_state;
+	struct mutex		phy_lock;
 };
 
 static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -155,14 +178,22 @@
 	if (!qphy->clocks_enabled && on) {
 		clk_prepare_enable(qphy->ref_clk_src);
 		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->iface_clk);
+		clk_prepare_enable(qphy->core_clk);
 		clk_prepare_enable(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = true;
 	}
 
 	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		/*
+		 * FSM depedency beween iface_clk and core_clk.
+		 * Hence turned off core_clk before iface_clk.
+		 */
+		clk_disable_unprepare(qphy->core_clk);
+		clk_disable_unprepare(qphy->iface_clk);
 		clk_disable_unprepare(qphy->ref_clk);
 		clk_disable_unprepare(qphy->ref_clk_src);
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = false;
 	}
 
@@ -170,6 +201,32 @@
 						qphy->clocks_enabled);
 }
 
+static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(qphy->gdsc))
+		return -EPERM;
+
+	if (on) {
+		dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n");
+		ret = regulator_enable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to enable gdsc\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n");
+		ret = regulator_disable(qphy->gdsc);
+		if (ret) {
+			dev_err(qphy->phy.dev, "unable to disable gdsc\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
 static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
 {
 	int min, ret;
@@ -313,6 +370,7 @@
 {
 	u8 num_of_bits;
 	u32 bit_mask = 1;
+	u8 reg_val;
 
 	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
 				qphy->tune2_efuse_num_of_bits,
@@ -326,9 +384,8 @@
 
 	/*
 	 * Read EFUSE register having TUNE2 parameter's high nibble.
-	 * If efuse register shows value as 0x0, then use default value
-	 * as 0xB as high nibble. Otherwise use efuse register based
-	 * value for this purpose.
+	 * If efuse register shows value as 0x0, then use previous value
+	 * as it is. Otherwise use efuse register based value for this purpose.
 	 */
 	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
 	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
@@ -337,12 +394,24 @@
 	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
 				qphy->tune2_efuse_bit_pos, bit_mask);
 
-	if (!qphy->tune2_val)
-		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
+	/* Update higher nibble of TUNE2 value for better rise/fall times */
+	if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+		if (qphy->tune2_efuse_correction > 5 ||
+				qphy->tune2_efuse_correction < -10)
+			pr_warn("Correction value is out of range : %d\n",
+					qphy->tune2_efuse_correction);
+		else
+			qphy->tune2_val = qphy->tune2_val +
+						qphy->tune2_efuse_correction;
+	}
 
-	/* Get TUNE2 byte value using high and low nibble value */
-	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
-					TUNE2_DEFAULT_LOW_NIBBLE);
+	reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+	if (qphy->tune2_val) {
+		reg_val  &= 0x0f;
+		reg_val |= (qphy->tune2_val << 4);
+	}
+
+	qphy->tune2_val = reg_val;
 }
 
 static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -450,7 +519,7 @@
 	 * and try to read EFUSE value only once i.e. not every USB
 	 * cable connect case.
 	 */
-	if (qphy->tune2_efuse_reg) {
+	if (qphy->tune2_efuse_reg && !tune2) {
 		if (!qphy->tune2_val)
 			qusb_phy_get_tune2_param(qphy);
 
@@ -460,13 +529,29 @@
 				qphy->base + QUSB2PHY_PORT_TUNE2);
 	}
 
-	/* If tune2 modparam set, override tune2 value */
-	if (tune2) {
-		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
-						__func__, tune2);
+	/* If tune modparam set, override tune value */
+
+	pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+				__func__, tune1, tune2, tune3, tune4, tune5);
+	if (tune1)
+		writel_relaxed(tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
+
+	if (tune2)
 		writel_relaxed(tune2,
 				qphy->base + QUSB2PHY_PORT_TUNE2);
-	}
+
+	if (tune3)
+		writel_relaxed(tune3,
+				qphy->base + QUSB2PHY_PORT_TUNE3);
+
+	if (tune4)
+		writel_relaxed(tune4,
+				qphy->base + QUSB2PHY_PORT_TUNE4);
+
+	if (tune5)
+		writel_relaxed(tune5,
+				qphy->base + QUSB2PHY_PORT_TUNE5);
 
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
@@ -596,27 +681,55 @@
 			writel_relaxed(intr_mask,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
+			if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+				/* enable phy auto-resume */
+				writel_relaxed(0x0C,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+				/* flush the previous write before next write */
+				wmb();
+				writel_relaxed(0x04,
+					qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+			}
+
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+
 			qusb_phy_enable_clocks(qphy, false);
 		} else { /* Disconnect case */
+			mutex_lock(&qphy->phy_lock);
 			/* Disable all interrupts */
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
-			/*
-			 * Phy in non-driving mode leaves Dp and Dm lines in
-			 * high-Z state. Controller power collapse is not
-			 * switching phy to non-driving mode causing charger
-			 * detection failure. Bring phy to non-driving mode by
-			 * overriding controller output via UTMI interface.
-			 */
-			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
-				OP_MODE_NON_DRIVE,
-				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
-			writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
-				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
 
+			/* Disable PHY */
+			writel_relaxed(POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
 
 			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
+			/* Do not disable power rails if there is vote for it */
+			if (!qphy->dpdm_enable)
+				qusb_phy_enable_power(qphy, false);
+			else
+				dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n");
+			mutex_unlock(&qphy->phy_lock);
+
+			/*
+			 * Set put_into_high_z_state to true so next USB
+			 * cable connect, DPF_DMF request performs PHY
+			 * reset and put it into high-z state. For bootup
+			 * with or without USB cable, it doesn't require
+			 * to put QUSB PHY into high-z state.
+			 */
+			qphy->put_into_high_z_state = true;
 		}
 		qphy->suspended = true;
 	} else {
@@ -629,6 +742,9 @@
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 		} else {
 			qusb_phy_enable_power(qphy, true);
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+					qphy->tcsr_clamp_dig_n);
 			qusb_phy_enable_clocks(qphy, true);
 		}
 		qphy->suspended = false;
@@ -669,15 +785,61 @@
 	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
 				__func__, qphy->dpdm_enable);
 
+	mutex_lock(&qphy->phy_lock);
 	if (!qphy->dpdm_enable) {
 		ret = qusb_phy_enable_power(qphy, true);
 		if (ret < 0) {
 			dev_dbg(qphy->phy.dev,
 				"dpdm regulator enable failed:%d\n", ret);
+			mutex_unlock(&qphy->phy_lock);
 			return ret;
 		}
 		qphy->dpdm_enable = true;
+		if (qphy->put_into_high_z_state) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x1,
+				qphy->tcsr_clamp_dig_n);
+
+			qusb_phy_gdsc(qphy, true);
+			qusb_phy_enable_clocks(qphy, true);
+
+			dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n");
+			ret = reset_control_assert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "phyassert failed\n");
+			usleep_range(100, 150);
+			ret = reset_control_deassert(qphy->phy_reset);
+			if (ret)
+				dev_err(qphy->phy.dev, "deassert failed\n");
+
+			/*
+			 * Phy in non-driving mode leaves Dp and Dm
+			 * lines in high-Z state. Controller power
+			 * collapse is not switching phy to non-driving
+			 * mode causing charger detection failure. Bring
+			 * phy to non-driving mode by overriding
+			 * controller output via UTMI interface.
+			 */
+			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+				OP_MODE_NON_DRIVE,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+			writel_relaxed(UTMI_ULPI_SEL |
+				UTMI_TEST_MUX_SEL,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+			/* Disable PHY */
+			writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+					POWER_DOWN,
+					qphy->base + QUSB2PHY_PORT_POWERDOWN);
+			/* Make sure that above write is completed */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_gdsc(qphy, false);
+		}
 	}
+	mutex_unlock(&qphy->phy_lock);
 
 	return ret;
 }
@@ -690,19 +852,25 @@
 	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
 				__func__, qphy->dpdm_enable);
 
+	mutex_lock(&qphy->phy_lock);
 	if (qphy->dpdm_enable) {
 		if (!qphy->cable_connected) {
+			if (qphy->tcsr_clamp_dig_n)
+				writel_relaxed(0x0,
+					qphy->tcsr_clamp_dig_n);
 			dev_dbg(qphy->phy.dev, "turn off for HVDCP case\n");
 			ret = qusb_phy_enable_power(qphy, false);
 			if (ret < 0) {
 				dev_dbg(qphy->phy.dev,
 					"dpdm regulator disable failed:%d\n",
 					ret);
+				mutex_unlock(&qphy->phy_lock);
 				return ret;
 			}
 		}
 		qphy->dpdm_enable = false;
 	}
+	mutex_unlock(&qphy->phy_lock);
 
 	return ret;
 }
@@ -794,6 +962,9 @@
 						"qcom,tune2-efuse-num-bits",
 						&qphy->tune2_efuse_num_of_bits);
 			}
+			of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-correction",
+						&qphy->tune2_efuse_correction);
 
 			if (ret) {
 				dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
@@ -829,6 +1000,17 @@
 		}
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"tcsr_clamp_dig_n_1p8");
+	if (res) {
+		qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+			dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+			qphy->tcsr_clamp_dig_n = NULL;
+		}
+	}
+
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(qphy->ref_clk_src))
 		dev_dbg(dev, "clk get failed for ref_clk_src\n");
@@ -847,6 +1029,34 @@
 	if (IS_ERR(qphy->phy_reset))
 		return PTR_ERR(qphy->phy_reset);
 
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "iface_clk") >= 0) {
+		qphy->iface_clk = devm_clk_get(dev, "iface_clk");
+		if (IS_ERR(qphy->iface_clk)) {
+			ret = PTR_ERR(qphy->iface_clk);
+			qphy->iface_clk = NULL;
+		if (ret == -EPROBE_DEFER)
+			return ret;
+			dev_err(dev, "couldn't get iface_clk(%d)\n", ret);
+		}
+	}
+
+	if (of_property_match_string(dev->of_node,
+		"clock-names", "core_clk") >= 0) {
+		qphy->core_clk = devm_clk_get(dev, "core_clk");
+		if (IS_ERR(qphy->core_clk)) {
+			ret = PTR_ERR(qphy->core_clk);
+			qphy->core_clk = NULL;
+			if (ret == -EPROBE_DEFER)
+				return ret;
+			dev_err(dev, "couldn't get core_clk(%d)\n", ret);
+		}
+	}
+
+	qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC");
+	if (IS_ERR(qphy->gdsc))
+		qphy->gdsc = NULL;
+
 	qphy->emulation = of_property_read_bool(dev->of_node,
 					"qcom,emulation");
 
@@ -981,6 +1191,7 @@
 		return PTR_ERR(qphy->vdda18);
 	}
 
+	mutex_init(&qphy->phy_lock);
 	platform_set_drvdata(pdev, qphy);
 
 	qphy->phy.label			= "msm-qusb-phy";
@@ -1010,6 +1221,10 @@
 	if (ret)
 		usb_remove_phy(&qphy->phy);
 
+	/* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+	if (qphy->tcsr_clamp_dig_n)
+		writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
 	return ret;
 }
 
diff --git a/include/linux/input/qpnp-power-on.h b/include/linux/input/qpnp-power-on.h
index a2624ab..5944f0f 100644
--- a/include/linux/input/qpnp-power-on.h
+++ b/include/linux/input/qpnp-power-on.h
@@ -51,6 +51,7 @@
 };
 
 enum pon_restart_reason {
+	/* 0 ~ 31 for common defined features */
 	PON_RESTART_REASON_UNKNOWN		= 0x00,
 	PON_RESTART_REASON_RECOVERY		= 0x01,
 	PON_RESTART_REASON_BOOTLOADER		= 0x02,
@@ -58,6 +59,10 @@
 	PON_RESTART_REASON_DMVERITY_CORRUPTED	= 0x04,
 	PON_RESTART_REASON_DMVERITY_ENFORCE	= 0x05,
 	PON_RESTART_REASON_KEYS_CLEAR		= 0x06,
+
+	/* 32 ~ 63 for OEMs/ODMs secific features */
+	PON_RESTART_REASON_OEM_MIN		= 0x20,
+	PON_RESTART_REASON_OEM_MAX		= 0x3f,
 };
 
 #ifdef CONFIG_INPUT_QPNP_POWER_ON
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 405aed5..46ee6da 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -117,6 +117,19 @@
 };
 
 /**
+* enum ipa_vlan_ifaces - vlan interfaces types
+* @IPA_VLAN_IF_EMAC: used for EMAC ethernet device
+* @IPA_VLAN_IF_RNDIS: used for RNDIS USB device
+* @IPA_VLAN_IF_ECM: used for ECM USB device
+*/
+enum ipa_vlan_ifaces {
+	IPA_VLAN_IF_EMAC,
+	IPA_VLAN_IF_RNDIS,
+	IPA_VLAN_IF_ECM,
+	IPA_VLAN_IF_MAX
+};
+
+/**
  * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
  * @nat_en:	This defines the default NAT mode for the pipe: in case of
  *		filter miss - the default NAT mode defines the NATing operation
@@ -1585,10 +1598,18 @@
  * Returns: 0 on success, negative on failure
  */
 int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
-
 int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
 	struct ipa_smmu_out_params *out);
-
+/**
+ * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * @iface - type of vlan capable device
+ * @res - query result: true for vlan mode, false for non vlan mode
+ *
+ * API must be called after ipa_is_ready() returns true, otherwise it will fail
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
 #else /* (CONFIG_IPA || CONFIG_IPA3) */
 
 /*
@@ -2382,6 +2403,11 @@
 {
 	return -EPERM;
 }
+
+static inline int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	return -EPERM;
+}
 #endif /* (CONFIG_IPA || CONFIG_IPA3) */
 
 #endif /* _IPA_H_ */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f56e98..b2eb2d0 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1120,12 +1120,12 @@
 int msm_ep_config(struct usb_ep *ep);
 int msm_ep_unconfig(struct usb_ep *ep);
 void dwc3_tx_fifo_resize_request(struct usb_ep *ep, bool qdss_enable);
-int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr, u32 size,
+int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr, u32 size,
 	u8 dst_pipe_idx);
 bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget);
 int msm_dwc3_reset_dbm_ep(struct usb_ep *ep);
 #else
-static inline int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+static inline int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr,
 	u32 size, u8 dst_pipe_idx)
 {	return -ENODEV; }
 
diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h
index 1b0ca4a..84d7549 100644
--- a/include/linux/usb_bam.h
+++ b/include/linux/usb_bam.h
@@ -245,10 +245,13 @@
  *
  * @bam_pipe_idx - allocated pipe index.
  *
+ * @iova - IPA address of USB peer BAM (i.e. QDSS BAM)
+ *
  * @return 0 on success, negative value on error
  *
  */
-int usb_bam_connect(enum usb_ctrl bam_type, int idx, u32 *bam_pipe_idx);
+int usb_bam_connect(enum usb_ctrl bam_type, int idx, u32 *bam_pipe_idx,
+						unsigned long iova);
 
 /**
  * Connect USB-to-IPA SPS connection.
@@ -430,12 +433,14 @@
 
 /* Frees memory for data fifo and descriptor fifos. */
 int usb_bam_free_fifos(enum usb_ctrl cur_bam, u8 idx);
-
+int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+			phys_addr_t *p_addr, u32 *bam_size);
 bool msm_bam_hsic_lpm_ok(void);
 bool msm_bam_hsic_host_pipe_empty(void);
 bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
 #else
-static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx)
+static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx,
+							unsigned long iova)
 {
 	return -ENODEV;
 }
@@ -529,6 +534,11 @@
 	return false;
 }
 
+static int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx,
+				phys_addr_t *p_addr, u32 *bam_size)
+{
+	return false;
+}
 static inline bool msm_bam_hsic_lpm_ok(void) { return true; }
 static inline bool msm_bam_hsic_host_pipe_empty(void) { return true; }
 static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index d3b9a33..ef07f78 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -819,13 +819,17 @@
  * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
  * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
  * IPA_HDR_L2_802_3: L2 header of type 802_3
+ * IPA_HDR_L2_802_1Q: L2 header of type 802_1Q
  */
 enum ipa_hdr_l2_type {
 	IPA_HDR_L2_NONE,
 	IPA_HDR_L2_ETHERNET_II,
 	IPA_HDR_L2_802_3,
+	IPA_HDR_L2_802_1Q,
 };
-#define IPA_HDR_L2_MAX (IPA_HDR_L2_802_3 + 1)
+#define IPA_HDR_L2_MAX (IPA_HDR_L2_802_1Q + 1)
+
+#define IPA_HDR_L2_802_1Q IPA_HDR_L2_802_1Q
 
 /**
  * enum ipa_hdr_l2_type - Processing context type
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index 9a767dd..cabf0a8 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -17,6 +17,9 @@
 #define CAM_SD_SHUTDOWN                         (CAM_COMMON_OPCODE_BASE + 0x7)
 #define CAM_COMMON_OPCODE_MAX                   (CAM_COMMON_OPCODE_BASE + 0x8)
 
+#define CAM_EXT_OPCODE_BASE                     0x200
+#define CAM_CONFIG_DEV_EXTERNAL                 (CAM_EXT_OPCODE_BASE + 0x1)
+
 /* camera handle type */
 #define CAM_HANDLE_USER_POINTER                 1
 #define CAM_HANDLE_MEM_HANDLE                   2
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 6846b8f..3f1facd 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -424,6 +424,7 @@
  */
 struct cam_req_mgr_message {
 	int32_t session_hdl;
+	int32_t reserved;
 	union {
 		struct cam_req_mgr_error_msg err_msg;
 		struct cam_req_mgr_frame_msg frame_msg;