Merge "drm/msm/sde: add connector support for power modes" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index b381bdeb..c467327 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -10,6 +10,10 @@
 
 - compatible: "qcom,wil6210"
 - qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
+- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
+- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
+- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
+- qcom,smmu-mapping: specifies the base address and size of SMMU space
 - qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
   the below optional properties:
@@ -33,6 +37,10 @@
 	wil6210: qcom,wil6210 {
 		compatible = "qcom,wil6210";
 		qcom,smmu-support;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
 		qcom,msm-bus,name = "wil6210";
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
new file mode 100644
index 0000000..ca584e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -0,0 +1,73 @@
+Qualcomm Technologies, Inc. SMB1355 Charger Specific Bindings
+
+SMB1355 slave charger is paired with QTI family of standalone chargers to
+enable a high current, low profile Li+ battery charging system.
+
+The device provides 28V DC withstand, wide operating input range of 3.8 to
+14.2V for standard 5V USB inputs as well as a wide variety of HVDCP Travel
+Adapters and is compatible with QTI's Quick Charge technology.
+
+=======================
+Required Node Structure
+=======================
+
+SMB1355 Charger must be described in two levels of device nodes.
+
+==================================
+First Level Node - SMB1355 Charger
+==================================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,smb1355".
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of SMB's revid module. This is used
+	      to identify the SMB subtype.
+
+================================================
+Second Level Nodes - SMB1355 Charger Peripherals
+================================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======
+Example
+=======
+
+smb1355_charger: qcom,smb1355-charger {
+	compatible = "qcom,smb1355";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts = <0x10 0x1 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "chg-state-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts = <0x16 0x1 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "wdog-bark";
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 702f252..28ab2dd 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -11,8 +11,7 @@
 VADC_TM node
 
 Required properties:
-- compatible : should be "qcom,qpnp-adc-tm" for thermal ADC driver.
-	     : should be "qcom,qpnp-adc-tm-hc" for thermal ADC driver using
+- compatible : should be "qcom,qpnp-adc-tm-hc" for thermal ADC driver using
 	       refreshed BTM peripheral.
 - reg : offset and length of the PMIC Aribter register map.
 - address-cells : Must be one.
@@ -156,51 +155,6 @@
 	qcom,client-adc_tm = <&pm8941_adc_tm>;
 };
 
-Example for "qcom,qpnp-adc-tm" device:
-	/* Main Node */
-	qcom,vadc@3400 {
-                        compatible = "qcom,qpnp-adc-tm";
-                        reg = <0x3400 0x100>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-                        interrupts = <0x0 0x34 0x0>,
-					<0x0 0x34 0x3>,
-					<0x0 0x34 0x4>;
-			interrupt-names = "eoc-int-en-set",
-					  "high-thr-en-set",
-					  "low-thr-en-set";
-                        qcom,adc-bit-resolution = <15>;
-                        qcom,adc-vdd-reference = <1800>;
-			qcom,adc_tm-vadc = <&pm8941_vadc>;
-
-			/* Channel Node to be registered as part of thermal sysfs */
-                        chan@b5 {
-                                label = "pa_therm1";
-				reg = <0xb5>;
-                                qcom,decimation = <0>;
-                                qcom,pre-div-channel-scaling = <0>;
-                                qcom,calibration-type = "absolute";
-                                qcom,scale-function = <2>;
-                                qcom,hw-settle-time = <0>;
-                                qcom,fast-avg-setup = <0>;
-				qcom,btm-channel-number = <0x70>;
-				qcom,thermal-node;
-                        };
-
-			/* Channel Node */
-			chan@6 {
-				label = "vbat_sns";
-				reg = <6>;
-				qcom,decimation = <0>;
-				qcom,pre-div-channel-scaling = <1>;
-				qcom,calibration-type = "absolute";
-				qcom,scale-function = <3>;
-				qcom,hw-settle-time = <0>;
-				qcom,fast-avg-setup = <0>;
-				qcom,btm-channel-number = <0x78>;
-			};
-	};
-
 Example for "qcom,qpnp-adc-tm-hc" device:
 	/* Main Node */
 	pm8998_adc_tm: vadc@3400 {
@@ -218,7 +172,7 @@
 
 			/* Channel Node to be registered as part of thermal sysfs */
                         chan@b5 {
-                                label = "pa_therm1";
+                                label = "msm_therm";
 				reg = <0xb5>;
                                 qcom,pre-div-channel-scaling = <0>;
                                 qcom,calibration-type = "absolute";
@@ -239,3 +193,21 @@
 				qcom,btm-channel-number = <0x78>;
 			};
 	};
+
+/* Example to register thermal sensor using of_thermal */
+&thermal_zones {
+	msm-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0xb5>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/Makefile b/Makefile
index 06a55b5..f834951 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 26
+SUBLEVEL = 27
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index d1712ad..c0556e4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -391,9 +391,8 @@
 
 		coresight-name = "coresight-hwevent";
 
-		clocks = <&clock_gcc RPMH_QDSS_CLK>,
-			 <&clock_gcc RPMH_QDSS_A_CLK>;
-		clock-names = "core_clk", "core_a_clk";
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
 	};
 
 	csr: csr@6001000 {
@@ -779,7 +778,7 @@
 		coresight-name = "coresight-tpdm-lpass";
 
 		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "core_clk";
+		clock-names = "apb_pclk";
 
 		port {
 			tpdm_lpass_out_funnel_lpass: endpoint {
@@ -1104,7 +1103,8 @@
 	};
 
 	tpdm_turing: tpdm@6860000 {
-		compatible = "qcom,coresight-tpdm";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
 		reg = <0x6860000 0x1000>;
 		reg-names = "tpdm-base";
 
@@ -1389,7 +1389,7 @@
 
 	cti_ddr0: cti@69e1000 {
 		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b969>;
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69e1000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1401,7 +1401,7 @@
 
 	cti_ddr1: cti@69e4000 {
 		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b969>;
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69e4000 0x1000>;
 		reg-names = "cti-base";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 715b566..d316d63 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -12,6 +12,7 @@
 
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm845-camera-sensor-mtp.dtsi"
+#include "smb1355.dtsi"
 
 / {
 	bluetooth: bt_wcn3990 {
@@ -203,6 +204,10 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&smb1355_charger {
+	status = "ok";
+};
+
 / {
 aliases {
 		serial0 = &qupv3_se9_2uart;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 3ab0c70..947262fb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -458,6 +458,58 @@
 			};
 		};
 
+		sde_dp_aux_active: sde_dp_aux_active {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		sde_dp_aux_suspend: sde_dp_aux_suspend {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		sde_dp_usbplug_cc_active: sde_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		sde_dp_usbplug_cc_suspend: sde_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index a4dc4753..e21ed36 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#include "smb1355.dtsi"
+
 /{
 	qrd_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
@@ -22,6 +24,10 @@
 	qcom,battery-data = <&qrd_batterydata>;
 };
 
+&smb1355_charger {
+	status = "ok";
+};
+
 &mdss_mdp {
 	#cooling-cells = <2>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index af63d22..9c497fa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -117,8 +117,6 @@
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x2>;
 		qcom,sde-ubwc-version = <0x200>;
-		qcom,sde-ubwc-static = <0x100>;
-		qcom,sde-ubwc-swizzle = <1>;
 		qcom,sde-panic-per-pipe;
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
@@ -219,8 +217,6 @@
 
 		qcom,mdss-rot-mode = <1>;
 		qcom,mdss-highest-bank-bit = <0x2>;
-		qcom,sde-ubwc-malsize = <1>;
-		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 72c2efa..b8018b6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -821,6 +821,13 @@
 		interrupts = <1 5 4>;
 	};
 
+	clock_rpmh: qcom,rpmhclk {
+		compatible = "qcom,rpmh-clk-sdm845";
+		#clock-cells = <1>;
+		mboxes = <&apps_rsc 0>;
+		mbox-names = "apps";
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdm845", "syscon";
 		reg = <0x100000 0x1f0000>;
@@ -995,13 +1002,6 @@
 		#reset-cells = <1>;
 	};
 
-	clock_rpmh: qcom,rpmhclk {
-		compatible = "qcom,rpmh-clk-sdm845";
-		#clock-cells = <1>;
-		mboxes = <&apps_rsc 0>;
-		mbox-names = "apps";
-	};
-
 	clock_debug: qcom,cc-debug@100000 {
 		compatible = "qcom,debugcc-sdm845";
 		qcom,cc-count = <5>;
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
new file mode 100644
index 0000000..33c5e97
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -0,0 +1,55 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&qupv3_se10_i2c {
+	smb1355: qcom,smb1355@8 {
+		compatible = "qcom,i2c-pmic";
+		reg = <0x8>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+		interrupt_names = "smb1355";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+
+		smb1355_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		smb1355_charger: qcom,smb1355-charger@1000 {
+			compatible = "qcom,smb1355";
+			qcom,pmic-revid = <&smb1355_revid>;
+			reg = <0x1000 0x700>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&smb1355>;
+			status = "disabled";
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "chg-state-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "wdog-bark";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index f989858..531d236 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -300,8 +300,8 @@
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_SMB2=y
-CONFIG_SMB138X_CHARGER=y
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7136ca8..cbcf515 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -309,8 +309,8 @@
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_SMB2=y
-CONFIG_SMB138X_CHARGER=y
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149c..d0ac2d5 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -489,8 +489,7 @@
 int tpm_get_timeouts(struct tpm_chip *chip)
 {
 	struct tpm_cmd_t tpm_cmd;
-	unsigned long new_timeout[4];
-	unsigned long old_timeout[4];
+	unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
 	struct duration_t *duration_cap;
 	ssize_t rc;
 
@@ -542,11 +541,15 @@
 	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
 		return -EINVAL;
 
-	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
-	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
-	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
-	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
-	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+	timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+	timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+	timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+	timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+	timeout_chip[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+	timeout_chip[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+	timeout_chip[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+	timeout_chip[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+	memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
 
 	/*
 	 * Provide ability for vendor overrides of timeout values in case
@@ -554,16 +557,24 @@
 	 */
 	if (chip->ops->update_timeouts != NULL)
 		chip->timeout_adjusted =
-			chip->ops->update_timeouts(chip, new_timeout);
+			chip->ops->update_timeouts(chip, timeout_eff);
 
 	if (!chip->timeout_adjusted) {
-		/* Don't overwrite default if value is 0 */
-		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
-			int i;
+		/* Restore default if chip reported 0 */
+		int i;
 
+		for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+			if (timeout_eff[i])
+				continue;
+
+			timeout_eff[i] = timeout_old[i];
+			chip->timeout_adjusted = true;
+		}
+
+		if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
 			/* timeouts in msec rather usec */
-			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
-				new_timeout[i] *= 1000;
+			for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+				timeout_eff[i] *= 1000;
 			chip->timeout_adjusted = true;
 		}
 	}
@@ -572,16 +583,16 @@
 	if (chip->timeout_adjusted) {
 		dev_info(&chip->dev,
 			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
-			 old_timeout[0], new_timeout[0],
-			 old_timeout[1], new_timeout[1],
-			 old_timeout[2], new_timeout[2],
-			 old_timeout[3], new_timeout[3]);
+			 timeout_chip[0], timeout_eff[0],
+			 timeout_chip[1], timeout_eff[1],
+			 timeout_chip[2], timeout_eff[2],
+			 timeout_chip[3], timeout_eff[3]);
 	}
 
-	chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
-	chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
-	chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
-	chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
+	chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+	chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+	chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+	chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
 
 duration:
 	tpm_cmd.header.in = tpm_getcap_header;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 2f9cfdf..3d101ac 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1064,17 +1064,10 @@
 }
 
 static int clk_dp_determine_rate(struct clk_hw *hw,
-				struct clk_rate_request *req)
+		struct clk_rate_request *req)
 {
-	if (!hw)
-		return -EINVAL;
-
-	if (!clk_hw_get_parent(hw)) {
-		pr_err("Missing the parent for the DP RCG\n");
-		return -EINVAL;
-	}
-
-	req->best_parent_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+	req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw,
+							req->best_parent_rate);
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 3b56fa1..d3a28e6 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -80,8 +80,8 @@
 
 static const char * const disp_cc_parent_names_1[] = {
 	"bi_tcxo",
-	"dp_phy_pll_link_clk",
-	"dp_phy_pll_vco_div_clk",
+	"dp_link_clk_divsel_ten",
+	"dp_vco_divided_clk_src_mux",
 	"core_bi_pll_test_se",
 };
 
@@ -217,12 +217,11 @@
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
-	F( 108000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 180000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 360000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 540000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 108000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 180000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 360000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
 	{ }
 };
 
@@ -236,23 +235,22 @@
 		.name = "disp_cc_mdss_dp_crypto_clk_src",
 		.parent_names = disp_cc_parent_names_1,
 		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 12800000,
-			LOWER, 108000000,
-			LOW, 180000000,
-			LOW_L1, 360000000,
-			NOMINAL, 540000000),
+			MIN, 12800,
+			LOWER, 108000,
+			LOW, 180000,
+			LOW_L1, 360000,
+			NOMINAL, 540000),
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
-	F_SLEW( 162000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  324000000),
-	F_SLEW( 270000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  540000000),
-	F_SLEW( 540000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1080000000),
-	F_SLEW( 810000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1620000000),
+	F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 810000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
 	{ }
 };
 
@@ -269,11 +267,11 @@
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 19200000,
-			LOWER, 162000000,
-			LOW, 270000000,
-			LOW_L1, 540000000,
-			NOMINAL, 810000000),
+			MIN, 19200,
+			LOWER, 162000,
+			LOW, 270000,
+			LOW_L1, 540000,
+			NOMINAL, 810000),
 	},
 };
 
@@ -284,17 +282,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel1_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -305,17 +301,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -664,23 +658,7 @@
 	},
 };
 
-static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
-	.reg = 0x2150,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_link_div_clk_src",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
 static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
 	.halt_reg = 0x2044,
 	.halt_check = BRANCH_HALT,
@@ -690,10 +668,10 @@
 		.hw.init = &(struct clk_init_data){
 			.name = "disp_cc_mdss_dp_link_intf_clk",
 			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_div_clk_src",
+				"disp_cc_mdss_dp_link_clk_src",
 			},
 			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.flags = CLK_GET_RATE_NOCACHE,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -955,8 +933,6 @@
 					&disp_cc_mdss_dp_crypto_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
-	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
-					&disp_cc_mdss_dp_link_div_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 9a71ea0..5604bf1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -259,15 +259,11 @@
 			       dsi_ctrl->cell_index, op_state);
 			rc = -EINVAL;
 		} else if (state->power_state == DSI_CTRL_POWER_VREG_ON) {
-			if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->controller_state == DSI_CTRL_ENGINE_ON)) {
-				pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+			if (state->vid_engine_state == DSI_CTRL_ENGINE_ON) {
+				pr_debug("[%d]State error: op=%d: %d\n",
 				       dsi_ctrl->cell_index,
 				       op_state,
-				       state->cmd_engine_state,
-				       state->vid_engine_state,
-				       state->controller_state);
+				       state->vid_engine_state);
 				rc = -EINVAL;
 			}
 		}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 86db16e..3402d48 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -3113,6 +3113,11 @@
 		pr_err("[%s] panel post-enable failed, rc=%d\n",
 		       display->name, rc);
 
+	/* remove the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+
 	mutex_unlock(&display->display_lock);
 	return rc;
 }
@@ -3128,6 +3133,11 @@
 
 	mutex_lock(&display->display_lock);
 
+	/* enable the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+
 	rc = dsi_panel_pre_disable(display->panel);
 	if (rc)
 		pr_err("[%s] panel pre-disable failed, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index ebfb40b8..a1a0e57 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -791,23 +791,12 @@
 		   bool skip_validation)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy || !config) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 
 	if (!skip_validation)
@@ -839,10 +828,6 @@
 error:
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
 	return rc;
 }
 
@@ -855,34 +840,17 @@
 int dsi_phy_disable(struct msm_dsi_phy *phy)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 	dsi_phy_disable_hw(phy);
 	phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
-
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8487f46..3c3f335 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -105,9 +105,13 @@
 
 static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
 {
-	struct circ_buf *fifo = &rd->fifo;
+	struct circ_buf *fifo;
 	const char *ptr = buf;
 
+	if (!rd || !buf)
+		return;
+
+	fifo = &rd->fifo;
 	while (sz > 0) {
 		char *fptr = &fifo->buf[fifo->head];
 		int n;
@@ -136,11 +140,18 @@
 static ssize_t rd_read(struct file *file, char __user *buf,
 		size_t sz, loff_t *ppos)
 {
-	struct msm_rd_state *rd = file->private_data;
-	struct circ_buf *fifo = &rd->fifo;
-	const char *fptr = &fifo->buf[fifo->tail];
+	struct msm_rd_state *rd;
+	struct circ_buf *fifo;
+	const char *fptr;
 	int n = 0, ret = 0;
 
+	if (!file || !file->private_data || !buf || !ppos)
+		return -EINVAL;
+
+	rd = file->private_data;
+	fifo = &rd->fifo;
+	fptr = &fifo->buf[fifo->tail];
+
 	mutex_lock(&rd->read_lock);
 
 	ret = wait_event_interruptible(rd->fifo_event,
@@ -168,19 +179,34 @@
 
 static int rd_open(struct inode *inode, struct file *file)
 {
-	struct msm_rd_state *rd = inode->i_private;
-	struct drm_device *dev = rd->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gpu *gpu = priv->gpu;
+	struct msm_rd_state *rd;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct msm_gpu *gpu;
 	uint64_t val;
 	uint32_t gpu_id;
 	int ret = 0;
 
+	if (!file || !inode || !inode->i_private)
+		return -EINVAL;
+
+	rd = inode->i_private;
+	dev = rd->dev;
+
+	if (!dev || !dev->dev_private)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	gpu = priv->gpu;
+
 	mutex_lock(&dev->struct_mutex);
 
 	if (rd->open || !gpu) {
 		ret = -EBUSY;
 		goto out;
+	} else if (!gpu->funcs || !gpu->funcs->get_param) {
+		ret = -EINVAL;
+		goto out;
 	}
 
 	file->private_data = rd;
@@ -201,7 +227,12 @@
 
 static int rd_release(struct inode *inode, struct file *file)
 {
-	struct msm_rd_state *rd = inode->i_private;
+	struct msm_rd_state *rd;
+
+	if (!inode || !inode->i_private)
+		return -EINVAL;
+
+	rd = inode->i_private;
 	rd->open = false;
 	return 0;
 }
@@ -217,9 +248,14 @@
 
 int msm_rd_debugfs_init(struct drm_minor *minor)
 {
-	struct msm_drm_private *priv = minor->dev->dev_private;
+	struct msm_drm_private *priv;
 	struct msm_rd_state *rd;
 
+	if (!minor || !minor->dev || !minor->dev->dev_private)
+		return -EINVAL;
+
+	priv = minor->dev->dev_private;
+
 	/* only create on first minor: */
 	if (priv->rd)
 		return 0;
@@ -265,8 +301,14 @@
 
 void msm_rd_debugfs_cleanup(struct drm_minor *minor)
 {
-	struct msm_drm_private *priv = minor->dev->dev_private;
-	struct msm_rd_state *rd = priv->rd;
+	struct msm_drm_private *priv;
+	struct msm_rd_state *rd;
+
+	if (!minor || !minor->dev || !minor->dev->dev_private)
+		return;
+
+	priv = minor->dev->dev_private;
+	rd = priv->rd;
 
 	if (!rd)
 		return;
@@ -315,13 +357,20 @@
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_gem_submit *submit)
 {
-	struct drm_device *dev = submit->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_rd_state *rd = priv->rd;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct msm_rd_state *rd;
 	char msg[128];
 	int i, n;
 
-	if (!rd->open)
+	if (!submit || !submit->dev || !submit->dev->dev_private)
+		return;
+
+	dev = submit->dev;
+	priv = dev->dev_private;
+	rd = priv->rd;
+
+	if (!rd || !rd->open)
 		return;
 
 	/* writing into fifo is serialized by caller, and
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 256d132..1bd7654 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -864,55 +864,82 @@
 	lm_bounds = &crtc_state->lm_bounds[lm_idx];
 	lm_roi = &crtc_state->lm_roi[lm_idx];
 
-	if (!sde_kms_rect_is_null(crtc_roi)) {
-		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
-		if (sde_kms_rect_is_null(lm_roi)) {
-			SDE_ERROR("unsupported R/L only partial update\n");
-			return -EINVAL;
-		}
-	} else {
+	if (sde_kms_rect_is_null(crtc_roi))
 		memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
-	}
+	else
+		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
 
 	SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
 			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
 
+	/* if any dimension is zero, clear all dimensions for clarity */
+	if (sde_kms_rect_is_null(lm_roi))
+		memset(lm_roi, 0, sizeof(*lm_roi));
+
 	return 0;
 }
 
+static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *crtc_state;
+	u32 disp_bitmask = 0;
+	int i;
+
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+			disp_bitmask |= BIT(i);
+	}
+
+	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
+
+	return disp_bitmask;
+}
+
 static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *roi_prv, *roi_cur;
-	int lm_idx;
+	const struct sde_rect *roi[CRTC_DUAL_MIXERS];
 
 	if (!crtc || !state)
 		return -EINVAL;
 
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
+	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("%s: unsupported number of mixers: %d\n",
+				sde_crtc->name, sde_crtc->num_mixers);
+		return -EINVAL;
+	}
+
 	/*
 	 * On certain HW, ROIs must be centered on the split between LMs,
 	 * and be of equal width.
 	 */
+	roi[0] = &crtc_state->lm_roi[0];
+	roi[1] = &crtc_state->lm_roi[1];
 
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
+	/* if one of the roi is null it's a left/right-only update */
+	if (sde_kms_rect_is_null(roi[0]) || sde_kms_rect_is_null(roi[1]))
+		return 0;
 
-	roi_prv = &crtc_state->lm_roi[0];
-	for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-		roi_cur = &crtc_state->lm_roi[lm_idx];
-
-		/* check lm rois are equal width & first roi ends at 2nd roi */
-		if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
-				(roi_prv->w != roi_cur->w)) {
-			SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
-					sde_crtc->name,
-					lm_idx-1, roi_prv->x, roi_prv->w,
-					lm_idx, roi_cur->x, roi_cur->w);
-			return -EINVAL;
-		}
-		roi_prv = roi_cur;
+	/* check lm rois are equal width & first roi ends at 2nd roi */
+	if (roi[0]->x + roi[0]->w != roi[1]->x || roi[0]->w != roi[1]->w) {
+		SDE_ERROR(
+			"%s: rois not centered and symmetric: roi0 x %d w %d roi1 x %d w %d\n",
+				sde_crtc->name, roi[0]->x, roi[0]->w,
+				roi[1]->x, roi[1]->w);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -1188,13 +1215,21 @@
  */
 static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *sde_crtc_state;
+	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_mixer *lm;
 
 	int i;
 
+	if (!crtc)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_crtc_state = to_sde_crtc_state(crtc->state);
+	mixer = sde_crtc->mixers;
+
 	SDE_DEBUG("%s\n", sde_crtc->name);
 
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
@@ -1225,9 +1260,19 @@
 	_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
+
 		ctl = mixer[i].hw_ctl;
 		lm = mixer[i].hw_lm;
 
+		if (sde_kms_rect_is_null(lm_roi)) {
+			SDE_DEBUG(
+				"%s: lm%d leave ctl%d mask 0 since null roi\n",
+					sde_crtc->name, lm->idx - LM_0,
+					ctl->idx - CTL_0);
+			continue;
+		}
+
 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 
 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
@@ -1720,9 +1765,9 @@
 		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
 	struct drm_device *dev;
 	unsigned long flags;
-	u32 i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1753,12 +1798,12 @@
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
-	/* Reset flush mask from previous commit */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
 
-		if (ctl)
-			ctl->ops.clear_pending_flush(ctl);
+		/* encoder will trigger pending mask now */
+		sde_encoder_trigger_kickoff_pending(encoder);
 	}
 
 	/*
@@ -1920,6 +1965,8 @@
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
 		params.inline_rotate_prefill = cstate->sbuf_prefill_line;
+		params.affected_displays = _sde_crtc_get_displays_affected(crtc,
+				crtc->state);
 		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
@@ -2151,21 +2198,62 @@
 	return 0;
 }
 
+static void sde_crtc_handle_power_event(u32 event_type, void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	SDE_EVT32(DRMID(crtc), event_type);
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
+		/* restore encoder; crtc will be programmed during commit */
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+
+			sde_encoder_virt_restore(encoder);
+		}
+
+	} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
+		struct drm_plane *plane;
+
+		/*
+		 * set revalidate flag in planes, so it will be re-programmed
+		 * in the next frame update
+		 */
+		drm_atomic_crtc_for_each_plane(plane, crtc)
+			sde_plane_set_revalidate(plane, true);
+	}
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc || !crtc->dev || !crtc->state) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
@@ -2205,6 +2293,10 @@
 		cstate->rsc_update = false;
 	}
 
+	if (sde_crtc->power_event)
+		sde_power_handle_unregister_event(&priv->phandle,
+				sde_crtc->power_event);
+
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 
@@ -2226,14 +2318,16 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32(DRMID(crtc));
@@ -2256,6 +2350,11 @@
 				sde_crtc->name, node->event);
 	}
 	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	sde_crtc->power_event = sde_power_handle_register_event(
+		&priv->phandle,
+		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+		sde_crtc_handle_power_event, crtc, sde_crtc->name);
 }
 
 struct plane_state {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 98ba711..ec5ec1d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -138,6 +138,7 @@
  * @event_free_list : List of available event structures
  * @event_lock    : Spinlock around event handling code
  * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @power_event   : registered power event handle
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -187,6 +188,8 @@
 	struct list_head event_free_list;
 	spinlock_t event_lock;
 	bool misr_enable;
+
+	struct sde_power_event *power_event;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -248,10 +251,10 @@
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
- * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
- *                  Origin top left of CRTC.
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
  * @lm_roi        : Current LM ROI, possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @user_roi_list : List of user's requested ROIs as from set property
@@ -274,8 +277,8 @@
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 
-	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect crtc_roi;
+	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
 	struct msm_roi_list user_roi_list;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 3357642..a136645 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1169,34 +1169,79 @@
 	}
 }
 
-static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
-	int i = 0;
 	int ret = 0;
 
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
 		return;
-	} else if (!drm_enc->dev) {
-		SDE_ERROR("invalid dev\n");
-		return;
-	} else if (!drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid dev_private\n");
+	}
+	priv = drm_enc->dev->dev_private;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc || !sde_enc->cur_master) {
+		SDE_ERROR("invalid sde encoder/master\n");
 		return;
 	}
 
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
-
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
 		return;
 	}
 
+	if (sde_enc->cur_master->hw_mdptop &&
+			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+				sde_enc->cur_master->hw_mdptop,
+				sde_kms->catalog);
+
+	if (_sde_is_dsc_enabled(sde_enc)) {
+		ret = _sde_encoder_dsc_setup(sde_enc);
+		if (ret)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
+	}
+}
+
+void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && (phys != sde_enc->cur_master) && phys->ops.restore)
+			phys->ops.restore(phys);
+	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->ops.restore)
+		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
+
+	_sde_encoder_virt_enable_helper(drm_enc);
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i, ret = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
 
@@ -1230,21 +1275,10 @@
 			phys->ops.enable(phys);
 	}
 
-	if (sde_enc->cur_master && sde_enc->cur_master->ops.enable)
+	if (sde_enc->cur_master->ops.enable)
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
-	if (sde_enc->cur_master && sde_enc->cur_master->hw_mdptop &&
-			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
-		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
-				sde_enc->cur_master->hw_mdptop,
-				sde_kms->catalog);
-
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n",
-					ret);
-	}
+	_sde_encoder_virt_enable_helper(drm_enc);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1463,6 +1497,14 @@
 		return;
 	}
 
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip flush pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
+
 	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
@@ -1484,11 +1526,21 @@
  */
 static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
 {
+	struct sde_hw_ctl *ctl;
+
 	if (!phys) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
+	ctl = phys->hw_ctl;
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip start pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
 	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
 		phys->ops.trigger_start(phys);
 }
@@ -1620,9 +1672,13 @@
 			topology = sde_connector_get_topology_name(
 					phys->connector);
 
-		/* don't wait on ppsplit slaves, they dont register irqs */
+		/*
+		 * don't wait on ppsplit slaves or skipped encoders because
+		 * they dont receive irqs
+		 */
 		if (!(topology == SDE_RM_TOPOLOGY_PPSPLIT &&
-				phys->split_role == ENC_ROLE_SLAVE))
+				phys->split_role == ENC_ROLE_SLAVE) &&
+				phys->split_role != ENC_ROLE_SKIP)
 			set_bit(i, sde_enc->frame_busy_mask);
 
 		if (!phys->ops.needs_single_flush ||
@@ -1645,6 +1701,92 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	int i, num_active_phys;
+	bool master_assigned = false;
+
+	if (!drm_enc || !params)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (sde_enc->num_phys_encs <= 1)
+		return;
+
+	/* count bits set */
+	num_active_phys = hweight_long(params->affected_displays);
+
+	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
+			params->affected_displays, num_active_phys);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		enum sde_enc_split_role prv_role, new_role;
+		bool active;
+
+		phys = sde_enc->phys_encs[i];
+		if (!phys || !phys->ops.update_split_role)
+			continue;
+
+		active = test_bit(i, &params->affected_displays);
+		prv_role = phys->split_role;
+
+		if (active && num_active_phys == 1)
+			new_role = ENC_ROLE_SOLO;
+		else if (active && !master_assigned)
+			new_role = ENC_ROLE_MASTER;
+		else if (active)
+			new_role = ENC_ROLE_SLAVE;
+		else
+			new_role = ENC_ROLE_SKIP;
+
+		phys->ops.update_split_role(phys, new_role);
+		if (new_role == ENC_ROLE_SOLO || new_role == ENC_ROLE_MASTER) {
+			sde_enc->cur_master = phys;
+			master_assigned = true;
+		}
+
+		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active);
+	}
+}
+
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	unsigned int i;
+	struct sde_hw_ctl *ctl;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->hw_ctl) {
+			ctl = phys->hw_ctl;
+			if (ctl->ops.clear_pending_flush)
+				ctl->ops.clear_pending_flush(ctl);
+
+			/* update only for command mode primary ctl */
+			if ((phys == sde_enc->cur_master) &&
+			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			    && ctl->ops.trigger_pending)
+				ctl->ops.trigger_pending(ctl);
+		}
+	}
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1654,8 +1796,8 @@
 	unsigned int i;
 	int rc;
 
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!drm_enc || !params) {
+		SDE_ERROR("invalid args\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
@@ -1678,6 +1820,7 @@
 
 	/* if any phys needs reset, reset all phys, in-order */
 	if (needs_hw_reset) {
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			phys = sde_enc->phys_encs[i];
 			if (phys && phys->ops.hw_reset)
@@ -1685,6 +1828,8 @@
 		}
 	}
 
+	_sde_encoder_update_master(drm_enc, params);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 5795e04..c5ddee6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -47,9 +47,12 @@
 /**
  * sde_encoder_kickoff_params - info encoder requires at kickoff
  * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
  */
 struct sde_encoder_kickoff_params {
 	u32 inline_rotate_prefill;
+	unsigned long affected_displays;
 };
 
 /**
@@ -101,6 +104,13 @@
 		struct sde_encoder_kickoff_params *params);
 
 /**
+ * sde_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
@@ -124,6 +134,12 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
  * enum sde_encoder_property - property tags for sde enoder
  * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
  */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6942292..a3b112d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -41,11 +41,13 @@
  * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
  * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
  * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ * @ENC_ROLE_SKIP:	This encoder is not participating in kickoffs
  */
 enum sde_enc_split_role {
 	ENC_ROLE_SOLO,
 	ENC_ROLE_MASTER,
-	ENC_ROLE_SLAVE
+	ENC_ROLE_SLAVE,
+	ENC_ROLE_SKIP
 };
 
 /**
@@ -118,6 +120,8 @@
  * @hw_reset:			Issue HW recovery such as CTL reset and clear
  *				SDE_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @update_split_role:		Update the split role of the phys enc
+ * @restore:			Restore all the encoder configs.
  */
 
 struct sde_encoder_phys_ops {
@@ -152,6 +156,9 @@
 	u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
 	void (*hw_reset)(struct sde_encoder_phys *phys_enc);
 	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
+	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
+			enum sde_enc_split_role role);
+	void (*restore)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -165,6 +172,7 @@
 	INTR_IDX_VSYNC,
 	INTR_IDX_PINGPONG,
 	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
 	INTR_IDX_RDPTR,
 	INTR_IDX_MAX,
 };
@@ -198,6 +206,8 @@
  *				vs. the number of done/vblank irqs. Should hover
  *				between 0-2 Incremented when a new kickoff is
  *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
  */
 struct sde_encoder_phys {
@@ -221,12 +231,14 @@
 	atomic_t vblank_refcount;
 	atomic_t vsync_cnt;
 	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
 	atomic_t pending_kickoff_cnt;
 	wait_queue_head_t pending_kickoff_wq;
 };
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
 {
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
 	return atomic_inc_return(&phys->pending_kickoff_cnt);
 }
 
@@ -265,7 +277,6 @@
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
-	int intf_idx;
 	int stream_sel;
 	int irq_idx[INTR_IDX_MAX];
 	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index a4f40f2..572bd9e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -21,18 +21,21 @@
 #define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
 #define PP_TIMEOUT_MAX_TRIALS	10
 
+/* wait for 2 vyncs only */
+#define CTL_START_TIMEOUT_MS	32
+
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -57,6 +60,46 @@
 	return true;
 }
 
+static void _sde_encoder_phys_cmd_update_flush_mask(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.get_bitmask_intf ||
+			!ctl->ops.update_pending_flush)
+		return;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+			ctl->idx - CTL_0, flush_mask);
+}
+
+static void _sde_encoder_phys_cmd_update_intf_cfg(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -130,11 +173,35 @@
 	if (!cmd_enc)
 		return;
 
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 }
 
+static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+	struct sde_hw_ctl *ctl;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	if (!phys_enc->hw_ctl)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
 static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
 {
 	enum sde_rm_topology_name topology;
@@ -240,7 +307,7 @@
 	if (ret <= 0) {
 		/* read and clear interrupt */
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				INTR_IDX_PINGPONG, true);
+				cmd_enc->irq_idx[INTR_IDX_PINGPONG], true);
 		if (irq_status) {
 			unsigned long flags;
 			SDE_EVT32(DRMID(phys_enc->parent),
@@ -295,8 +362,13 @@
 		return -EINVAL;
 	}
 
-	idx_lookup = (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN) ?
-			cmd_enc->intf_idx : phys_enc->hw_pp->idx;
+	if (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN)
+		idx_lookup = phys_enc->intf_idx;
+	else if (intr_type == SDE_IRQ_TYPE_CTL_START)
+		idx_lookup = phys_enc->hw_ctl ? phys_enc->hw_ctl->idx : -1;
+	else
+		idx_lookup = phys_enc->hw_pp->idx;
+
 	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
 			intr_type, idx_lookup);
 	if (cmd_enc->irq_idx[idx] < 0) {
@@ -409,9 +481,13 @@
 void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
 		bool enable)
 {
+	struct sde_encoder_phys_cmd *cmd_enc;
+
 	if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
 		return;
 
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
 	if (enable) {
 		sde_encoder_phys_cmd_register_irq(phys_enc,
 				SDE_IRQ_TYPE_PING_PONG_COMP,
@@ -426,7 +502,17 @@
 				INTR_IDX_UNDERRUN,
 				sde_encoder_phys_cmd_underrun_irq,
 				"underrun");
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_CTL_START,
+				INTR_IDX_CTL_START,
+				sde_encoder_phys_cmd_ctl_start_irq,
+				"ctl_start");
 	} else {
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_CTL_START);
 		sde_encoder_phys_cmd_unregister_irq(
 				phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
@@ -513,12 +599,11 @@
 	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
 }
 
-static void sde_encoder_phys_cmd_pingpong_config(
+static void _sde_encoder_phys_cmd_pingpong_config(
 		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
 	if (!phys_enc || !phys_enc->hw_ctl ||
 			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
@@ -530,13 +615,7 @@
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	intf_cfg.intf = cmd_enc->intf_idx;
-	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
-	intf_cfg.stream_sel = cmd_enc->stream_sel;
-	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
-
-	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
-
+	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -549,10 +628,9 @@
 	return _sde_encoder_phys_is_ppsplit(phys_enc);
 }
 
-static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+static void sde_encoder_phys_cmd_enable_helper(
+		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_hw_ctl *ctl;
 	u32 flush_mask = 0;
 
@@ -560,6 +638,25 @@
 		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 		return;
 	}
+
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid phys encoder\n");
+		return;
+	}
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
 	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
@@ -567,17 +664,8 @@
 		return;
 	}
 
-	sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
-
-	sde_encoder_phys_cmd_pingpong_config(phys_enc);
-
-	ctl = phys_enc->hw_ctl;
-	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
-	ctl->ops.update_pending_flush(ctl, flush_mask);
+	sde_encoder_phys_cmd_enable_helper(phys_enc);
 	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
-			ctl->idx - CTL_0, flush_mask);
 }
 
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
@@ -639,7 +727,7 @@
 		return;
 	}
 	SDE_DEBUG_CMDENC(cmd_enc, "\n");
-	hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
@@ -671,24 +759,93 @@
 	}
 }
 
+static int _sde_encoder_phys_cmd_wait_for_ctl_start(
+		struct sde_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct sde_hw_ctl *ctl;
+	u32 irq_status;
+	struct sde_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc->hw_ctl) {
+		SDE_ERROR("invalid ctl\n");
+		return -EINVAL;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	rc = sde_encoder_helper_wait_event_timeout(DRMID(phys_enc->parent),
+			ctl->idx - CTL_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_ctlstart_cnt,
+			CTL_START_TIMEOUT_MS);
+	if (rc <= 0) {
+		/* read and clear interrupt */
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				cmd_enc->irq_idx[INTR_IDX_CTL_START], true);
+		if (irq_status) {
+			unsigned long flags;
+
+			SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
+			SDE_DEBUG_CMDENC(cmd_enc,
+					"ctl:%d start done but irq not triggered\n",
+					ctl->idx - CTL_0);
+			local_irq_save(flags);
+			sde_encoder_phys_cmd_ctl_start_irq(cmd_enc,
+					INTR_IDX_CTL_START);
+			local_irq_restore(flags);
+			rc = 0;
+		} else {
+			SDE_ERROR("ctl start interrupt wait failed\n");
+			rc = -EINVAL;
+		}
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
+	int rc = 0;
+	struct sde_encoder_phys_cmd *cmd_enc;
 
-	if (cmd_enc->serialize_wait4pp)
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (sde_encoder_phys_cmd_is_master(phys_enc))
+		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
 		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
 
-	/*
-	 * following statement is true serialize_wait4pp is false.
-	 *
-	 * Since ctl_start "commits" the transaction to hardware, and the
-	 * tearcheck block takes it from there, there is no need to have a
-	 * separate wait for committed, a la wait-for-vsync in video mode
-	 */
+	return rc;
+}
 
-	return 0;
+static void sde_encoder_phys_cmd_update_split_role(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_enc_split_role role)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	enum sde_enc_split_role old_role = phys_enc->split_role;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
+			old_role, role);
+
+	phys_enc->split_role = role;
+	if (role == ENC_ROLE_SKIP || role == old_role)
+		return;
+
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
+	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
 }
 
 static void sde_encoder_phys_cmd_init_ops(
@@ -708,6 +865,8 @@
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
+	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
+	ops->restore = sde_encoder_phys_cmd_enable_helper;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -735,8 +894,6 @@
 		goto fail_mdp_init;
 	}
 	phys_enc->hw_mdptop = hw_mdp;
-
-	cmd_enc->intf_idx = p->intf_idx;
 	phys_enc->intf_idx = p->intf_idx;
 
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -753,6 +910,7 @@
 		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 5f257bb..826fe14 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -139,10 +139,9 @@
 	struct sde_fence *f = to_sde_fence(fence);
 	struct sde_fence *fc, *next;
 	struct sde_fence_context *ctx = f->ctx;
-	unsigned long flags;
 	bool release_kref = false;
 
-	spin_lock_irqsave(&ctx->lock, flags);
+	spin_lock(&ctx->list_lock);
 	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
 				 fence_list) {
 		/* fence release called before signal */
@@ -152,7 +151,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&ctx->lock, flags);
+	spin_unlock(&ctx->list_lock);
 
 	/* keep kput outside spin_lock because it may release ctx */
 	if (release_kref)
@@ -198,7 +197,6 @@
 	struct sync_file *sync_file;
 	signed int fd = -EINVAL;
 	struct sde_fence_context *ctx = fence_ctx;
-	unsigned long flags;
 
 	if (!ctx) {
 		SDE_ERROR("invalid context\n");
@@ -234,12 +232,12 @@
 
 	fd_install(fd, sync_file->file);
 
-	spin_lock_irqsave(&ctx->lock, flags);
+	spin_lock(&ctx->list_lock);
 	sde_fence->ctx = fence_ctx;
 	sde_fence->fd = fd;
 	list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
 	kref_get(&ctx->kref);
-	spin_unlock_irqrestore(&ctx->lock, flags);
+	spin_unlock(&ctx->list_lock);
 exit:
 	return fd;
 }
@@ -260,6 +258,7 @@
 	ctx->context = fence_context_alloc(1);
 
 	spin_lock_init(&ctx->lock);
+	spin_lock_init(&ctx->list_lock);
 	INIT_LIST_HEAD(&ctx->fence_list_head);
 
 	return 0;
@@ -333,7 +332,8 @@
 {
 	unsigned long flags;
 	struct sde_fence *fc, *next;
-	uint32_t count = 0;
+	bool is_signaled = false;
+	struct list_head local_list_head;
 
 	if (!ctx) {
 		SDE_ERROR("invalid ctx, %pK\n", ctx);
@@ -342,37 +342,45 @@
 		return;
 	}
 
+	INIT_LIST_HEAD(&local_list_head);
+
 	spin_lock_irqsave(&ctx->lock, flags);
 	if ((int)(ctx->done_count - ctx->commit_count) < 0) {
 		++ctx->done_count;
+		SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
+					ctx->commit_count, ctx->done_count);
 	} else {
 		SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
 					ctx->done_count, ctx->commit_count);
-		goto end;
+		spin_unlock_irqrestore(&ctx->lock, flags);
+		return;
 	}
+	spin_unlock_irqrestore(&ctx->lock, flags);
 
+	spin_lock(&ctx->list_lock);
 	if (list_empty(&ctx->fence_list_head)) {
 		SDE_DEBUG("nothing to trigger!-no get_prop call\n");
-		goto end;
+		spin_unlock(&ctx->list_lock);
+		return;
 	}
 
-	SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
-					ctx->commit_count, ctx->done_count);
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list)
+		list_move(&fc->fence_list, &local_list_head);
+	spin_unlock(&ctx->list_lock);
 
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
-				 fence_list) {
-		if (fence_is_signaled_locked(&fc->base)) {
-			list_del_init(&fc->fence_list);
-			count++;
+	list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
+		spin_lock_irqsave(&ctx->lock, flags);
+		is_signaled = fence_signal_locked(&fc->base);
+		spin_unlock_irqrestore(&ctx->lock, flags);
+
+		if (is_signaled) {
+			kref_put(&ctx->kref, sde_fence_destroy);
+		} else {
+			spin_lock(&ctx->list_lock);
+			list_move(&fc->fence_list, &ctx->fence_list_head);
+			spin_unlock(&ctx->list_lock);
 		}
 	}
 
 	SDE_EVT32(ctx->drm_id, ctx->done_count);
-
-end:
-	spin_unlock_irqrestore(&ctx->lock, flags);
-
-	/* keep this outside spin_lock because same ctx may be released */
-	for (; count > 0; count--)
-		kref_put(&ctx->kref, sde_fence_destroy);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index f3f8b35..207f29c 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -29,7 +29,8 @@
  * @done_count: Number of completed commits since bootup
  * @drm_id: ID number of owning DRM Object
  * @ref: kref counter on timeline
- * @lock: spinlock for timeline and fence counter protection
+ * @lock: spinlock for fence counter protection
+ * @list_lock: spinlock for timeline protection
  * @context: fence context
  * @list_head: fence list to hold all the fence created on this context
  * @name: name of fence context/timeline
@@ -40,6 +41,7 @@
 	uint32_t drm_id;
 	struct kref kref;
 	spinlock_t lock;
+	spinlock_t list_lock;
 	u64 context;
 	struct list_head fence_list_head;
 	char name[SDE_FENCE_NAME_SIZE];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 82f1c09..a62aa6e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -26,6 +26,7 @@
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 #define   CTL_ROT_TOP                   0x0C0
@@ -78,6 +79,11 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
+static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
 static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
 {
 	SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
@@ -537,6 +543,7 @@
 	ops->trigger_flush = sde_hw_ctl_trigger_flush;
 	ops->get_flush_register = sde_hw_ctl_get_flush_register;
 	ops->trigger_start = sde_hw_ctl_trigger_start;
+	ops->trigger_pending = sde_hw_ctl_trigger_pending;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
 	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 7ae43b7..ace05e8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -91,6 +91,14 @@
 	void (*trigger_start)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * kickoff rotator operation for Sw controlled interfaces
 	 * DSI cmd mode and WB interface are SW controlled
 	 * @ctx       : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 93268be..c408861 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -75,12 +75,6 @@
 
 #define TX_MODE_BUFFER_LINE_THRES 2
 
-/* dirty bits for update function */
-#define SDE_PLANE_DIRTY_RECTS	0x1
-#define SDE_PLANE_DIRTY_FORMAT	0x2
-#define SDE_PLANE_DIRTY_SHARPEN	0x4
-#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
-
 #define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
 #define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
 
@@ -107,7 +101,8 @@
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
  * @catalog: Points to sde catalog structure
  * @sbuf_mode: force stream buffer mode if set
- * @sbuf_writeback: fource stream buffer writeback if set
+ * @sbuf_writeback: force stream buffer writeback if set
+ * @revalidate: force revalidation of all the plane properties
  * @blob_rot_caps: Pointer to rotator capability blob
  */
 struct sde_plane {
@@ -134,6 +129,7 @@
 	struct sde_mdss_cfg *catalog;
 	u32 sbuf_mode;
 	u32 sbuf_writeback;
+	bool revalidate;
 
 	struct sde_hw_pixel_ext pixel_ext;
 	bool pixel_ext_usr;
@@ -499,6 +495,17 @@
 			&psde->pipe_qos_cfg);
 }
 
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable)
+{
+	struct sde_plane *psde;
+
+	if (!plane)
+		return;
+
+	psde = to_sde_plane(plane);
+	psde->revalidate = enable;
+}
+
 int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
 {
 	struct sde_plane *psde;
@@ -2250,6 +2257,14 @@
 			state->crtc_w, state->crtc_h,
 			state->crtc_x, state->crtc_y);
 
+	/* force reprogramming of all the parameters, if the flag is set */
+	if (psde->revalidate) {
+		SDE_DEBUG("plane:%d - reconfigure all the parameters\n",
+				plane->base.id);
+		pstate->dirty = SDE_PLANE_DIRTY_ALL;
+		psde->revalidate = false;
+	}
+
 	/* determine what needs to be refreshed */
 	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
 		switch (idx) {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e955f41..ac70542 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -91,6 +91,12 @@
 	int out_xpos;
 };
 
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS	0x1
+#define SDE_PLANE_DIRTY_FORMAT	0x2
+#define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
+
 /**
  * struct sde_plane_state: Define sde extension of drm plane state object
  * @base:	base drm plane state object
@@ -222,4 +228,12 @@
 int sde_plane_color_fill(struct drm_plane *plane,
 		uint32_t color, uint32_t alpha);
 
+/**
+ * sde_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
 #endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b5546ef..585beb9 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -224,9 +224,27 @@
 	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
 }
 
+static void _update_always_on_regs(struct adreno_device *adreno_dev)
+{
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int *const regs = gpudev->reg_offsets->offsets;
+
+	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
+		A6XX_CP_ALWAYS_ON_COUNTER_LO;
+	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
+		A6XX_CP_ALWAYS_ON_COUNTER_HI;
+}
+
 static void a6xx_init(struct adreno_device *adreno_dev)
 {
 	a6xx_crashdump_init(adreno_dev);
+
+	/*
+	 * If the GMU is not enabled, rewrite the offset for the always on
+	 * counters to point to the CP always on instead of GMU always on
+	 */
+	if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
+		_update_always_on_regs(adreno_dev);
 }
 
 /**
@@ -2205,10 +2223,6 @@
 				A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
 				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
-	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
-				A6XX_CP_ALWAYS_ON_COUNTER_LO),
-	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
-				A6XX_CP_ALWAYS_ON_COUNTER_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 8f49bc7..d836cbb 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -43,6 +43,7 @@
 #include "kgsl_sync.h"
 #include "kgsl_compat.h"
 #include "kgsl_pool.h"
+#include "adreno.h"
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
@@ -1045,7 +1046,10 @@
 	int result = 0;
 
 	mutex_lock(&device->mutex);
-	device->open_count--;
+
+	if (!adreno_is_a6xx(ADRENO_DEVICE(device)))
+		device->open_count--;
+
 	if (device->open_count == 0) {
 
 		/* Wait for the active count to go to 0 */
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 4314616..b99c1df 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3115,7 +3115,7 @@
 {
 	int sioaddr[2] = { REG_2E, REG_4E };
 	struct it87_sio_data sio_data;
-	unsigned short isa_address;
+	unsigned short isa_address[2];
 	bool found = false;
 	int i, err;
 
@@ -3125,15 +3125,29 @@
 
 	for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
 		memset(&sio_data, 0, sizeof(struct it87_sio_data));
-		isa_address = 0;
-		err = it87_find(sioaddr[i], &isa_address, &sio_data);
-		if (err || isa_address == 0)
+		isa_address[i] = 0;
+		err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+		if (err || isa_address[i] == 0)
 			continue;
+		/*
+		 * Don't register second chip if its ISA address matches
+		 * the first chip's ISA address.
+		 */
+		if (i && isa_address[i] == isa_address[0])
+			break;
 
-		err = it87_device_add(i, isa_address, &sio_data);
+		err = it87_device_add(i, isa_address[i], &sio_data);
 		if (err)
 			goto exit_dev_unregister;
+
 		found = true;
+
+		/*
+		 * IT8705F may respond on both SIO addresses.
+		 * Stop probing after finding one.
+		 */
+		if (sio_data.type == it87)
+			break;
 	}
 
 	if (!found) {
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
index 5857d30..22e9d6f 100644
--- a/drivers/hwtracing/coresight/coresight-hwevent.c
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -216,14 +216,10 @@
 
 	mutex_init(&drvdata->mutex);
 
-	drvdata->clk = devm_clk_get(dev, "core_clk");
+	drvdata->clk = devm_clk_get(dev, "apb_pclk");
 	if (IS_ERR(drvdata->clk))
 		return PTR_ERR(drvdata->clk);
 
-	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
-	if (ret)
-		return ret;
-
 	drvdata->nr_hclk = of_property_count_strings(pdev->dev.of_node,
 						     "qcom,hwevent-clks");
 	drvdata->nr_hreg = of_property_count_strings(pdev->dev.of_node,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 833f10d..475ea75 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -495,7 +495,7 @@
 	}
 }
 
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
 {
 	CS_UNLOCK(drvdata->base);
 
@@ -557,6 +557,207 @@
 	}
 }
 
+static void tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+	get_qdss_bam_connection_info(&bamdata->dest,
+				    &bamdata->dest_pipe_idx,
+				    &bamdata->src_pipe_idx,
+				    &bamdata->desc_fifo,
+				    &bamdata->data_fifo,
+				    NULL);
+}
+
+static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+	uint32_t axictl;
+
+	if (drvdata->enable_to_bam)
+		return;
+
+	/* Configure and enable required CSR registers */
+	msm_qdss_csr_enable_bam_to_usb();
+
+	/* Configure and enable ETR for usb bam output */
+
+	CS_UNLOCK(drvdata->base);
+
+	writel_relaxed(bamdata->data_fifo.size / 4, drvdata->base + TMC_RSZ);
+	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+	axictl |= (0xF << 8);
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+	axictl &= ~(0x1 << 7);
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+	axictl = (axictl & ~0x3) | 0x2;
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+	writel_relaxed((uint32_t)bamdata->data_fifo.phys_base,
+		       drvdata->base + TMC_DBALO);
+	writel_relaxed((((uint64_t)bamdata->data_fifo.phys_base) >> 32) & 0xFF,
+		       drvdata->base + TMC_DBAHI);
+	/* Set FOnFlIn for periodic flush */
+	writel_relaxed(0x133, drvdata->base + TMC_FFCR);
+	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+	tmc_enable_hw(drvdata);
+
+	CS_LOCK(drvdata->base);
+
+	drvdata->enable_to_bam = true;
+}
+
+static int tmc_etr_bam_enable(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+	int ret;
+
+	if (bamdata->enable)
+		return 0;
+
+	/* Reset bam to start with */
+	ret = sps_device_reset(bamdata->handle);
+	if (ret)
+		goto err0;
+
+	/* Now configure and enable bam */
+
+	bamdata->pipe = sps_alloc_endpoint();
+	if (!bamdata->pipe)
+		return -ENOMEM;
+
+	ret = sps_get_config(bamdata->pipe, &bamdata->connect);
+	if (ret)
+		goto err1;
+
+	bamdata->connect.mode = SPS_MODE_SRC;
+	bamdata->connect.source = bamdata->handle;
+	bamdata->connect.event_thresh = 0x4;
+	bamdata->connect.src_pipe_index = TMC_ETR_BAM_PIPE_INDEX;
+	bamdata->connect.options = SPS_O_AUTO_ENABLE;
+
+	bamdata->connect.destination = bamdata->dest;
+	bamdata->connect.dest_pipe_index = bamdata->dest_pipe_idx;
+	bamdata->connect.desc = bamdata->desc_fifo;
+	bamdata->connect.data = bamdata->data_fifo;
+
+	ret = sps_connect(bamdata->pipe, &bamdata->connect);
+	if (ret)
+		goto err1;
+
+	bamdata->enable = true;
+	return 0;
+err1:
+	sps_free_endpoint(bamdata->pipe);
+err0:
+	return ret;
+}
+
+static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
+{
+	int count;
+
+	/* Ensure no flush is in progress */
+	for (count = TIMEOUT_US;
+	     BVAL(readl_relaxed(drvdata->base + TMC_FFSR), 0) != 0
+	     && count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while waiting for TMC flush, TMC_FFSR: %#x\n",
+	     readl_relaxed(drvdata->base + TMC_FFSR));
+}
+
+void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata)
+{
+	if (!drvdata->enable_to_bam)
+		return;
+
+	/* Ensure periodic flush is disabled in CSR block */
+	msm_qdss_csr_disable_flush();
+
+	CS_UNLOCK(drvdata->base);
+
+	tmc_wait_for_flush(drvdata);
+	tmc_disable_hw(drvdata);
+
+	CS_LOCK(drvdata);
+
+	/* Disable CSR configuration */
+	msm_qdss_csr_disable_bam_to_usb();
+	drvdata->enable_to_bam = false;
+}
+
+void tmc_etr_bam_disable(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+	if (!bamdata->enable)
+		return;
+
+	sps_disconnect(bamdata->pipe);
+	sps_free_endpoint(bamdata->pipe);
+	bamdata->enable = false;
+}
+
+void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
+		  struct usb_qdss_ch *ch)
+{
+	struct tmc_drvdata *drvdata = priv;
+	unsigned long flags;
+	int ret = 0;
+
+	mutex_lock(&drvdata->mem_lock);
+	if (event == USB_QDSS_CONNECT) {
+		tmc_etr_fill_usb_bam_data(drvdata);
+		ret = tmc_etr_bam_enable(drvdata);
+		if (ret)
+			dev_err(drvdata->dev, "ETR BAM enable failed\n");
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		__tmc_etr_enable_to_bam(drvdata);
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	} else if (event == USB_QDSS_DISCONNECT) {
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		__tmc_etr_disable_to_bam(drvdata);
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		tmc_etr_bam_disable(drvdata);
+	}
+	mutex_unlock(&drvdata->mem_lock);
+}
+
+int tmc_etr_bam_init(struct amba_device *adev,
+		     struct tmc_drvdata *drvdata)
+{
+	int ret;
+	struct device *dev = &adev->dev;
+	struct resource res;
+	struct tmc_etr_bam_data *bamdata;
+
+	bamdata = devm_kzalloc(dev, sizeof(*bamdata), GFP_KERNEL);
+	if (!bamdata)
+		return -ENOMEM;
+	drvdata->bamdata = bamdata;
+
+	ret = of_address_to_resource(adev->dev.of_node, 1, &res);
+	if (ret)
+		return -ENODEV;
+
+	bamdata->props.phys_addr = res.start;
+	bamdata->props.virt_addr = devm_ioremap(dev, res.start,
+						resource_size(&res));
+	if (!bamdata->props.virt_addr)
+		return -ENOMEM;
+	bamdata->props.virt_size = resource_size(&res);
+
+	bamdata->props.event_threshold = 0x4; /* Pipe event threshold */
+	bamdata->props.summing_threshold = 0x10; /* BAM event threshold */
+	bamdata->props.irq = 0;
+	bamdata->props.num_pipes = TMC_ETR_BAM_NR_PIPES;
+
+	return sps_register_bam_device(&bamdata->props, &bamdata->handle);
+}
+
 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
 {
 	int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 10e8da4..01dc5e1 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -63,11 +63,13 @@
 
 void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
+	drvdata->enable = true;
 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 }
 
 void tmc_disable_hw(struct tmc_drvdata *drvdata)
 {
+	drvdata->enable = false;
 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 }
 
@@ -309,6 +311,100 @@
 }
 static DEVICE_ATTR_RW(mem_size);
 
+static ssize_t out_mode_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			str_tmc_etr_out_mode[drvdata->out_mode]);
+}
+
+static ssize_t out_mode_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[10] = "";
+	unsigned long flags;
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+	if (sscanf(buf, "%10s", str) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mem_lock);
+	if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM])) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+			goto out;
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
+		}
+		__tmc_etr_disable_to_bam(drvdata);
+		tmc_etr_enable_hw(drvdata);
+		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+		tmc_etr_bam_disable(drvdata);
+		usb_qdss_close(drvdata->usbch);
+	} else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+			goto out;
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
+		}
+		if (drvdata->reading) {
+			ret = -EBUSY;
+			goto err1;
+		}
+		tmc_etr_disable_hw(drvdata);
+		drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+		drvdata->usbch = usb_qdss_open("qdss", drvdata,
+					       usb_notifier);
+		if (IS_ERR(drvdata->usbch)) {
+			dev_err(drvdata->dev, "usb_qdss_open failed\n");
+			ret = PTR_ERR(drvdata->usbch);
+			goto err0;
+		}
+	}
+out:
+	mutex_unlock(&drvdata->mem_lock);
+	return size;
+err1:
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+err0:
+	mutex_unlock(&drvdata->mem_lock);
+	return ret;
+}
+static DEVICE_ATTR_RW(out_mode);
+
+static ssize_t available_out_modes_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(str_tmc_etr_out_mode); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
+				str_tmc_etr_out_mode[i]);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+static DEVICE_ATTR_RO(available_out_modes);
+
 static ssize_t mem_type_show(struct device *dev,
 			     struct device_attribute *attr,
 			     char *buf)
@@ -355,6 +451,8 @@
 	&dev_attr_mem_size.attr,
 	&dev_attr_mem_type.attr,
 	&dev_attr_trigger_cntr.attr,
+	&dev_attr_out_mode.attr,
+	&dev_attr_available_out_modes.attr,
 	NULL,
 };
 
@@ -460,6 +558,10 @@
 		desc.ops = &tmc_etr_cs_ops;
 		desc.groups = coresight_tmc_etr_groups;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+
+		ret = tmc_etr_bam_init(adev, drvdata);
+		if (ret)
+			goto out;
 	} else {
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.ops = &tmc_etf_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 726dcd6..3d6e823 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -19,7 +19,12 @@
 #define _CORESIGHT_TMC_H
 
 #include <linux/miscdevice.h>
+#include <linux/delay.h>
 #include <asm/cacheflush.h>
+#include <linux/of_address.h>
+#include <linux/amba/bus.h>
+#include <linux/usb_bam.h>
+#include <linux/usb/usb_qdss.h>
 
 #define TMC_RSZ			0x004
 #define TMC_STS			0x00c
@@ -77,6 +82,8 @@
 #define TMC_ETR_SG_NXT_TBL(phys_pte)	(((phys_pte >> PAGE_SHIFT) << 4) | 0x3)
 #define TMC_ETR_SG_LST_ENT(phys_pte)	(((phys_pte >> PAGE_SHIFT) << 4) | 0x1)
 
+#define TMC_ETR_BAM_PIPE_INDEX	0
+#define TMC_ETR_BAM_NR_PIPES	2
 
 enum tmc_config_type {
 	TMC_CONFIG_TYPE_ETB,
@@ -107,6 +114,30 @@
 	[TMC_ETR_MEM_TYPE_SG]		= "sg",
 };
 
+enum tmc_etr_out_mode {
+	TMC_ETR_OUT_MODE_NONE,
+	TMC_ETR_OUT_MODE_MEM,
+	TMC_ETR_OUT_MODE_USB,
+};
+
+static const char * const str_tmc_etr_out_mode[] = {
+	[TMC_ETR_OUT_MODE_NONE]		= "none",
+	[TMC_ETR_OUT_MODE_MEM]		= "mem",
+	[TMC_ETR_OUT_MODE_USB]		= "usb",
+};
+
+struct tmc_etr_bam_data {
+	struct sps_bam_props	props;
+	unsigned long		handle;
+	struct sps_pipe		*pipe;
+	struct sps_connect	connect;
+	uint32_t		src_pipe_idx;
+	unsigned long		dest;
+	uint32_t		dest_pipe_idx;
+	struct sps_mem_buffer	desc_fifo;
+	struct sps_mem_buffer	data_fifo;
+	bool			enable;
+};
 
 /**
  * struct tmc_drvdata - specifics associated to an TMC component
@@ -132,6 +163,7 @@
 	struct miscdevice	miscdev;
 	spinlock_t		spinlock;
 	bool			reading;
+	bool			enable;
 	char			*buf;
 	dma_addr_t		paddr;
 	void __iomem		*vaddr;
@@ -147,6 +179,11 @@
 	enum tmc_etr_mem_type	memtype;
 	u32			delta_bottom;
 	int			sg_blk_num;
+	enum tmc_etr_out_mode	out_mode;
+	struct usb_qdss_ch	*usbch;
+	struct tmc_etr_bam_data	*bamdata;
+	bool			enable_to_bam;
+
 };
 
 /* Generic functions */
@@ -166,5 +203,13 @@
 			     char **bufpp, size_t *len);
 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
+void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
+		  struct usb_qdss_ch *ch);
+int tmc_etr_bam_init(struct amba_device *adev,
+		     struct tmc_drvdata *drvdata);
 extern const struct coresight_ops tmc_etr_cs_ops;
 #endif
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 89ec6d2..be13ebf 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1847,7 +1847,7 @@
 	if (r)
 		goto out;
 
-	param->data_size = sizeof(*param);
+	param->data_size = offsetof(struct dm_ioctl, data);
 	r = fn(param, input_param_size);
 
 	if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index 87707b1..e6da6ca 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1,3 +1,9 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o cam_req_mgr_workq.o cam_mem_mgr.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o \
+				cam_req_mgr_util.o \
+				cam_req_mgr_core.o \
+				cam_req_mgr_workq.o \
+				cam_mem_mgr.o \
+				cam_req_mgr_timer.o \
+				cam_req_mgr_debug.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index a34703c..e62c101 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -18,588 +18,1960 @@
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
 #include "cam_req_mgr_workq.h"
-
-/* Forward declarations */
-static int cam_req_mgr_cb_notify_sof(
-	struct cam_req_mgr_sof_notify *sof_data);
-
+#include "cam_req_mgr_debug.h"
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 
-static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
-	.notify_sof = cam_req_mgr_cb_notify_sof,
-	.notify_err = NULL,
-	.add_req = NULL,
-};
+static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                  i = 0;
+	int                      rc = 0;
+	struct crm_task_payload *task_data = NULL;
+
+	task_data = kcalloc(
+		workq->task.num_task, sizeof(*task_data),
+		GFP_KERNEL);
+	if (!task_data) {
+		rc = -ENOMEM;
+	} else {
+		for (i = 0; i < workq->task.num_task; i++)
+			workq->task.pool[i].payload = &task_data[i];
+	}
+
+	return rc;
+}
 
 /**
- * cam_req_mgr_pvt_find_link()
+ * __cam_req_mgr_reset_req_tbl()
  *
- * @brief: Finds link matching with handle within session
- * @session: session indetifier
- * @link_hdl: link handle
+ * @brief : Initialize req table data
+ * @in_q  : request queue pointer
  *
- * Returns pointer to link matching handle
+ * @return: 0 for success, negative for failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_find_link(
-	struct cam_req_mgr_core_session *session, int32_t link_hdl)
+static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req)
 {
-	int32_t i;
+	int                           rc = 0;
+	int32_t                       i = 0;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+	struct cam_req_mgr_req_tbl   *req_tbl = req->l_tbl;
+
+	if (!in_q || !req_tbl) {
+		CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+		return -EINVAL;
+	}
+	CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+	mutex_lock(&req->lock);
+	for (i = 0; i < in_q->num_slots; i++) {
+		CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+			in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
+	}
+
+	while (req_tbl != NULL) {
+		for (i = 0; i < req_tbl->num_slots; i++) {
+			CRM_DBG("idx= %d, map= %x, state= %d",
+				req_tbl->slot[i].idx,
+				req_tbl->slot[i].req_ready_map,
+				req_tbl->slot[i].state);
+		}
+		CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+			req_tbl->id, req_tbl->pd, req_tbl->dev_count,
+			req_tbl->dev_mask, req_tbl->skip_traverse,
+			req_tbl->num_slots);
+		req_tbl = req_tbl->next;
+	}
+	mutex_unlock(&req->lock);
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_find_pd_tbl()
+ *
+ * @brief    : Find pipeline delay based table pointer which matches delay
+ * @tbl      : Pointer to list of request table
+ * @delay    : Pipeline delay value to be searched for comparison
+ *
+ * @return   : pointer to request table for matching pipeline delay table.
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
+	struct cam_req_mgr_req_tbl *tbl, int32_t delay)
+{
+	if (!tbl)
+		return NULL;
+
+	do {
+		if (delay != tbl->pd)
+			tbl = tbl->next;
+		else
+			return tbl;
+	} while (tbl != NULL);
+
+	return NULL;
+}
+
+/**
+ * __cam_req_mgr_inc_idx()
+ *
+ * @brief    : Increment val passed by step size and rollover after max_val
+ * @val      : value to be incremented
+ * @step     : amount/step by which val is incremented
+ * @max_val  : max val after which idx will roll over
+ *
+ */
+static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = (*val + step) % max_val;
+}
+
+/**
+ * __cam_req_mgr_dec_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @val      : value to be decremented
+ * @step     : amount/step by which val is decremented
+ * @max_val  : after zero value will roll over to max val
+ *
+ */
+static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = *val - step;
+	if (*val < 0)
+		*val = max_val + (*val);
+}
+
+/**
+ * __cam_req_mgr_traverse()
+ *
+ * @brief    : Traverse through pd tables, it will internally cover all linked
+ *             pd tables. Each pd table visited will check if idx passed to its
+ *             in ready state. If ready means all devices linked to the pd table
+ *             have this request id packet ready. Then it calls subsequent pd
+ *             tbl with new idx. New idx value takes into account the delta
+ *             between current pd table and next one.
+ * @traverse_data: contains all the info to traverse through pd tables
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
+{
+	int                          rc = 0;
+	int32_t                      next_idx = traverse_data->idx;
+	int32_t                      curr_idx = traverse_data->idx;
+	struct cam_req_mgr_req_tbl  *tbl;
+	struct cam_req_mgr_apply    *apply_data;
+
+	if (!traverse_data->tbl || !traverse_data->apply_data) {
+		CRM_ERR("NULL pointer %pK %pK",
+			traverse_data->tbl, traverse_data->apply_data);
+		traverse_data->result = 0;
+		return -EINVAL;
+	}
+
+	tbl = traverse_data->tbl;
+	apply_data = traverse_data->apply_data;
+	CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
+		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
+
+	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
+	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
+		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
+		tbl->skip_traverse > 0) {
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		if (rc >= 0) {
+			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+			apply_data[tbl->pd].pd = tbl->pd;
+			apply_data[tbl->pd].req_id =
+				CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
+			apply_data[tbl->pd].idx = curr_idx;
+
+			/* If traverse is sucessful decrement traverse skip */
+			if (tbl->skip_traverse > 0) {
+				apply_data[tbl->pd].req_id = -1;
+				tbl->skip_traverse--;
+			}
+		} else {
+			/* linked pd table is not ready for this traverse yet */
+			return rc;
+		}
+	} else {
+		/* This pd table is not ready to proceed with asked idx */
+		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_in_q_skip_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @in_q     : input queue pointer
+ * @idx      : Sets skip_idx bit of the particular slot to true so when traverse
+ *             happens for this idx, no req will be submitted for devices
+ *             handling this idx.
+ *
+ */
+static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
+	int32_t idx)
+{
+	in_q->slot[idx].req_id = -1;
+	in_q->slot[idx].skip_idx = 1;
+	in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+	CRM_DBG("SET IDX SKIP on slot= %d", idx);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_id()
+ *
+ * @brief    : Set unique id to table
+ * @tbl      : pipeline based table which requires new id
+ * @req      : pointer to request data wihch contains num_tables counter
+ *
+ */
+static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
+	struct cam_req_mgr_req_data *req)
+{
+	if (!tbl)
+		return;
+	do {
+		tbl->id = req->num_tbl++;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_all_skip_cnt()
+ *
+ * @brief    : Each pd table sets skip value based on delta between itself and
+ *             max pd value. During initial streamon or bubble case this is
+ *             used. That way each pd table skips required num of traverse and
+ *             align themselve with req mgr connected devs.
+ * @l_tbl    : iterates through list of pd tables and sets skip traverse
+ *
+ */
+static void __cam_req_mgr_tbl_set_all_skip_cnt(
+	struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl = *l_tbl;
+	int32_t                     max_pd;
+
+	if (!tbl)
+		return;
+
+	max_pd = tbl->pd;
+	do {
+		tbl->skip_traverse = max_pd - tbl->pd;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_reset_req_slot()
+ *
+ * @brief    : reset specified idx/slot in input queue as well as all pd tables
+ * @link     : link pointer
+ * @idx      : slot index which will be reset
+ *
+ */
+static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	struct cam_req_mgr_slot      *slot;
+	struct cam_req_mgr_req_tbl   *tbl = link->req.l_tbl;
+	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+	slot = &in_q->slot[idx];
+	CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if CSL has already pushed new request*/
+	if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
+		return;
+
+	/* Reset input queue slot */
+	slot->req_id = -1;
+	slot->skip_idx = 0;
+	slot->recover = 0;
+	slot->status = CRM_SLOT_STATUS_NO_REQ;
+
+	/* Reset all pd table slot */
+	while (tbl != NULL) {
+		CRM_DBG("pd: %d: idx %d state %d",
+			tbl->pd, idx, tbl->slot[idx].state);
+		tbl->slot[idx].req_ready_map = 0;
+		tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
+		tbl = tbl->next;
+	}
+}
+
+/**
+ * __cam_req_mgr_check_next_req_slot()
+ *
+ * @brief    : While streaming if input queue does not contain any pending
+ *             request, req mgr still needs to submit pending request ids to
+ *             devices with lower pipeline delay value.
+ * @in_q     : Pointer to input queue where req mgr wil peep into
+ *
+ */
+static void __cam_req_mgr_check_next_req_slot(
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int32_t                  idx = in_q->rd_idx;
+	struct cam_req_mgr_slot *slot;
+
+	__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
+	slot = &in_q->slot[idx];
+
+	CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if there is new req from CSL, if not complete req */
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		if (in_q->wr_idx != idx)
+			CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+		__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	}
+}
+
+/**
+ * __cam_req_mgr_send_req()
+ *
+ * @brief    : send request id to be applied to each device connected on link
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @in_q     : pointer to input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int                                  rc = 0, pd, i, idx;
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_apply_request     apply_req;
+	struct cam_req_mgr_link_evt_data     evt_data;
+
+	apply_req.link_hdl = link->link_hdl;
+	apply_req.report_if_bubble = 0;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev) {
+			pd = dev->dev_info.p_delay;
+			if (pd >= CAM_PIPELINE_DELAY_MAX) {
+				CRM_WARN("pd %d greater than max",
+					pd);
+				continue;
+			}
+			if (link->req.apply_data[pd].skip_idx ||
+				link->req.apply_data[pd].req_id < 0) {
+				CRM_DBG("skip %d req_id %lld",
+					link->req.apply_data[pd].skip_idx,
+					link->req.apply_data[pd].req_id);
+				continue;
+			}
+			apply_req.dev_hdl = dev->dev_hdl;
+			apply_req.request_id =
+				link->req.apply_data[pd].req_id;
+			idx = link->req.apply_data[pd].idx;
+			apply_req.report_if_bubble =
+				in_q->slot[idx].recover;
+			CRM_DBG("SEND: pd %d req_id %lld",
+				pd, apply_req.request_id);
+			if (dev->ops && dev->ops->apply_req) {
+				rc = dev->ops->apply_req(&apply_req);
+				if (rc < 0)
+					break;
+			}
+		}
+	}
+	if (rc < 0) {
+		CRM_ERR("APPLY FAILED pd %d req_id %lld",
+			dev->dev_info.p_delay, apply_req.request_id);
+		/* Apply req failed notify already applied devs */
+		for (; i >= 0; i--) {
+			dev = &link->l_dev[i];
+			evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
+			evt_data.link_hdl =  link->link_hdl;
+			evt_data.req_id = apply_req.request_id;
+			evt_data.u.error = CRM_KMD_ERR_BUBBLE;
+			if (dev->ops && dev->ops->process_evt)
+				dev->ops->process_evt(&evt_data);
+		}
+	}
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_check_link_is_ready()
+ *
+ * @brief    : traverse through all request tables and see if all devices are
+ *             ready to apply request settings.
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @idx      : index within input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	int                            rc;
+	struct cam_req_mgr_traverse    traverse_data;
+	struct cam_req_mgr_req_queue  *in_q;
+	struct cam_req_mgr_apply      *apply_data;
+
+	in_q = link->req.in_q;
+
+	apply_data = link->req.apply_data;
+	memset(apply_data, 0,
+		sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+
+	traverse_data.apply_data = apply_data;
+	traverse_data.idx = idx;
+	traverse_data.tbl = link->req.l_tbl;
+	traverse_data.in_q = in_q;
+	traverse_data.result = 0;
+	/*
+	 *  Traverse through all pd tables, if result is success,
+	 *  apply the settings
+	 */
+
+	rc = __cam_req_mgr_traverse(&traverse_data);
+	CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+		idx, traverse_data.result, link->pd_mask, rc);
+
+	if (!rc && traverse_data.result == link->pd_mask) {
+		CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+			link->link_hdl, idx,
+			apply_data[2].req_id, apply_data[1].req_id,
+			apply_data[0].req_id);
+	} else
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_process_req()
+ *
+ * @brief    : processes read index in request queue and traverse through table
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link)
+{
+	int                                  rc = 0, idx;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_req_queue        *in_q;
+	struct cam_req_mgr_core_session     *session;
+
+	in_q = link->req.in_q;
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	/*
+	 * 1. Check if new read index,
+	 * - if in pending  state, traverse again to complete
+	 *    transaction of this read index.
+	 * - if in applied_state, somthign wrong.
+	 * - if in no_req state, no new req
+	 */
+	CRM_DBG("idx %d req_status %d",
+		in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+
+	slot = &in_q->slot[in_q->rd_idx];
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		CRM_DBG("No Pending req");
+		return 0;
+	}
+
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
+	if (rc >= 0) {
+		rc = __cam_req_mgr_send_req(link, link->req.in_q);
+		if (rc < 0) {
+			/* Apply req failed retry at next sof */
+			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		} else {
+			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+
+			if (link->state == CAM_CRM_LINK_STATE_ERR) {
+				CRM_WARN("Err recovery done idx %d status %d",
+					in_q->rd_idx,
+					in_q->slot[in_q->rd_idx].status);
+				mutex_lock(&link->lock);
+				link->state = CAM_CRM_LINK_STATE_READY;
+				mutex_unlock(&link->lock);
+			}
+
+			/*
+			 * 2. Check if any new req is pending in input queue,
+			 *    if not finish the lower pipeline delay device with
+			 *    available req ids.
+			 */
+			__cam_req_mgr_check_next_req_slot(in_q);
+
+			/*
+			 * 3. Older req slots can be safely reset as no err ack.
+			 */
+			idx = in_q->rd_idx;
+			__cam_req_mgr_dec_idx(&idx, link->max_delay + 1,
+				in_q->num_slots);
+			__cam_req_mgr_reset_req_slot(link, idx);
+		}
+	} else {
+		/*
+		 * 4.If traverse result is not success, then some devices are
+		 *   not ready with packet for the asked request id,
+		 *   hence try again in next sof
+		 */
+		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		if (link->state == CAM_CRM_LINK_STATE_ERR) {
+			/*
+			 * During error recovery all tables should be ready
+			 *   don't expect to enter here.
+			 * @TODO: gracefully handle if recovery fails.
+			 */
+			CRM_ERR("FATAL recovery cant finish idx %d status %d",
+				in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+			rc = -EPERM;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_add_tbl_to_link()
+ *
+ * @brief    : Add table to list under link sorted by pd decremeting order
+ * @l_tbl    : list of pipeline delay tables.
+ * @new_tbl : new tbl which will be appended to above list as per its pd value
+ *
+ */
+static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
+	struct cam_req_mgr_req_tbl *new_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl;
+
+	if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
+		new_tbl->next = *l_tbl;
+		if (*l_tbl) {
+			new_tbl->pd_delta =
+				new_tbl->pd - (*l_tbl)->pd;
+		}
+		*l_tbl = new_tbl;
+	} else {
+		tbl = *l_tbl;
+
+		/* Reach existing  tbl which has less pd value */
+		while (tbl->next != NULL &&
+			new_tbl->pd < tbl->next->pd) {
+			tbl = tbl->next;
+		}
+		if (tbl->next != NULL) {
+			new_tbl->pd_delta =
+				new_tbl->pd - tbl->next->pd;
+		} else {
+			/* This is last table in linked list*/
+			new_tbl->pd_delta = 0;
+		}
+		new_tbl->next = tbl->next;
+		tbl->next = new_tbl;
+		tbl->pd_delta = tbl->pd - new_tbl->pd;
+	}
+	CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+		new_tbl->pd_delta);
+}
+
+/**
+ * __cam_req_mgr_create_pd_tbl()
+ *
+ * @brief    : Creates new request table for new delay value
+ * @delay    : New pd table allocated will have this delay value
+ *
+ * @return   : pointer to newly allocated table, NULL for failure
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
+{
+	struct cam_req_mgr_req_tbl *tbl =
+		kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
+	if (tbl != NULL) {
+		tbl->num_slots = MAX_REQ_SLOTS;
+		CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+	}
+
+	return tbl;
+}
+
+/**
+ * __cam_req_mgr_destroy_all_tbl()
+ *
+ * @brief   : This func will destroy all pipeline delay based req table structs
+ * @l_tbl    : pointer to first table in list and it has max pd .
+ *
+ */
+static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl  *tbl = *l_tbl, *temp;
+
+	CRM_DBG("*l_tbl %pK", tbl);
+	while (tbl != NULL) {
+		temp = tbl->next;
+		kfree(tbl);
+		tbl = temp;
+	}
+	*l_tbl = NULL;
+}
+
+/**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief    : Find idx from input queue at which req id is enqueued
+ * @in_q     : input request queue pointer
+ * @req_id   : request id which needs to be searched in input queue
+ *
+ * @return   : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+	int32_t                   idx, i;
+	struct cam_req_mgr_slot  *slot;
+
+	idx = in_q->wr_idx;
+	for (i = 0; i < in_q->num_slots; i++) {
+		slot = &in_q->slot[idx];
+		if (slot->req_id == req_id) {
+			CRM_DBG("req %lld found at %d %d status %d",
+				req_id, idx, slot->idx,
+				slot->status);
+			break;
+		}
+		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+	}
+	if (i >= in_q->num_slots)
+		idx = -1;
+
+	return idx;
+}
+
+/**
+ * __cam_req_mgr_setup_in_q()
+ *
+ * @brief : Initialize req table data
+ * @req   : request data pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int  __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
+{
+	int                           i;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	in_q->num_slots = MAX_REQ_SLOTS;
+
+	for (i = 0; i < in_q->num_slots; i++) {
+		in_q->slot[i].idx = i;
+		in_q->slot[i].req_id = -1;
+		in_q->slot[i].skip_idx = 0;
+		in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
+	}
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_reset_req_tbl()
+ *
+ * @brief : Initialize req table data
+ * @req   : request queue pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
+{
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
+	in_q->num_slots = 0;
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_sof_freeze()
+ *
+ * @brief : Apoptosis - Handles case when connected devices are not responding
+ * @data  : timer pointer
+ *
+ */
+static void __cam_req_mgr_sof_freeze(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
 	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	link = (struct cam_req_mgr_core_link *)timer->parent;
+	CRM_ERR("SOF freeze for link %x", link->link_hdl);
+}
+
+/**
+ * __cam_req_mgr_create_subdevs()
+ *
+ * @brief   : Create new crm  subdev to link with realtime devices
+ * @l_dev   : list of subdevs internal to crm
+ * @num_dev : num of subdevs to be created for link
+ *
+ * @return  : pointer to allocated list of devices
+ */
+static int __cam_req_mgr_create_subdevs(
+	struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
+{
+	int rc = 0;
+	*l_dev = (struct cam_req_mgr_connected_device *)
+		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
+		GFP_KERNEL);
+	if (!*l_dev)
+		rc = -ENOMEM;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_subdev()
+ *
+ * @brief    : Cleans up the subdevs allocated by crm for link
+ * @l_device : pointer to list of subdevs crm created
+ *
+ */
+static void __cam_req_mgr_destroy_subdev(
+	struct cam_req_mgr_connected_device *l_device)
+{
+	kfree(l_device);
+	l_device = NULL;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief    : Cleans up the mem allocated while linking
+ * @link     : pointer to link, mem associated with this link is freed
+ *
+ */
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+{
+	int32_t                                 i = 0;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+
+	mutex_lock(&link->lock);
+
+	link_data.link_enable = 0;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = NULL;
+
+	/* Using device ops unlink devices */
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev != NULL) {
+			if (dev->ops && dev->ops->link_setup)
+				dev->ops->link_setup(&link_data);
+			dev->dev_hdl = 0;
+			dev->parent = NULL;
+			dev->ops = NULL;
+		}
+	}
+	__cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
+	__cam_req_mgr_reset_in_q(&link->req);
+	link->req.num_tbl = 0;
+	mutex_destroy(&link->req.lock);
+
+	link->pd_mask = 0;
+	link->num_devs = 0;
+	link->max_delay = 0;
+
+	mutex_unlock(&link->lock);
+}
+
+/**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief: Reserves one link data struct within session
+ * @session: session identifier
+ *
+ * @return: pointer to link reserved
+ *
+ */
+static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
+	struct cam_req_mgr_core_session *session)
+{
+	struct cam_req_mgr_core_link *link;
+	struct cam_req_mgr_req_queue *in_q;
+
+	if (!session || !g_crm_core_dev) {
+		CRM_ERR("NULL session/core_dev ptr");
 		return NULL;
 	}
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_hdl == link_hdl) {
-			CRM_DBG("Link found p_delay %d",
-				 link->max_pipeline_delay);
-			spin_unlock(&link->lock);
-			break;
-		}
-		spin_unlock(&link->lock);
+	if (session->num_links >= MAX_LINKS_PER_SESSION) {
+		CRM_ERR("Reached max links %d per session limit %d",
+			session->num_links, MAX_LINKS_PER_SESSION);
+		return NULL;
 	}
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-	spin_unlock(&session->lock);
+
+	link = (struct cam_req_mgr_core_link *)
+		kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
+	if (!link) {
+		CRM_ERR("failed to create link, no mem");
+		return NULL;
+	}
+	in_q = &session->in_q;
+	mutex_init(&link->lock);
+
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	link->num_devs = 0;
+	link->max_delay = 0;
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
+	link->req.in_q = in_q;
+	in_q->num_slots = 0;
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	link->parent = (void *)session;
+	mutex_unlock(&link->lock);
+
+	mutex_lock(&session->lock);
+	session->links[session->num_links] = link;
+	session->num_links++;
+	CRM_DBG("Active session links (%d)",
+		session->num_links);
+	mutex_unlock(&session->lock);
 
 	return link;
 }
 
 /**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief  : Reserves one link data struct within session
+ * @session: session identifier
+ * @link   : link identifier
+ *
+ */
+static void __cam_req_mgr_unreserve_link(
+	struct cam_req_mgr_core_session *session,
+	struct cam_req_mgr_core_link **link)
+{
+	int32_t   i = 0;
+
+	if (!session || !*link) {
+		CRM_ERR("NULL session/link ptr %pK %pK",
+			session, *link);
+		return;
+	}
+
+	mutex_lock(&session->lock);
+	if (!session->num_links)
+		CRM_WARN("No active link or invalid state %d",
+			session->num_links);
+	else {
+		for (i = 0; i < session->num_links; i++) {
+			if (session->links[i] == *link)
+				session->links[i] = NULL;
+		}
+		session->num_links--;
+		CRM_DBG("Active session links (%d)",
+			session->num_links);
+	}
+	kfree(*link);
+	*link = NULL;
+	mutex_unlock(&session->lock);
+
+}
+
+/* Workqueue context processing section */
+
+/**
+ * cam_req_mgr_process_send_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to send
+ *         apply request id to drivers.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_send_req(void *priv, void *data)
+{
+	int                                 rc = 0;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_send_request     *send_req = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	send_req = (struct cam_req_mgr_send_request *)data;
+	in_q = send_req->in_q;
+
+	rc = __cam_req_mgr_send_req(link, in_q);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_flush_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which requests need to be removedcancelled.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_flush_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx = -1;
+	struct cam_req_mgr_flush_info       *flush_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_flush_request     flush_req;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld type %d",
+		flush_info->link_hdl,
+		flush_info->req_id,
+		flush_info->flush_type);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		for (i = 0; i < in_q->num_slots; i++) {
+			slot = &in_q->slot[i];
+			slot->req_id = -1;
+			slot->skip_idx = 1;
+			slot->status = CRM_SLOT_STATUS_NO_REQ;
+		}
+		in_q->wr_idx = 0;
+		in_q->rd_idx = 0;
+	} else if (flush_info->flush_type ==
+		CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			flush_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				flush_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
+				slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be cancelled",
+					flush_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		}
+	}
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		flush_req.link_hdl = flush_info->link_hdl;
+		flush_req.dev_hdl = device->dev_hdl;
+		flush_req.req_id = flush_info->req_id;
+		flush_req.type = flush_info->flush_type;
+		/* @TODO: error return handling from drivers */
+		if (device->ops && device->ops->flush_req)
+			rc = device->ops->flush_req(&flush_req);
+	}
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_sched_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_sched_req(void *priv, void *data)
+{
+	int                               rc = 0;
+	struct cam_req_mgr_sched_request *sched_req = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_req_queue     *in_q = NULL;
+	struct cam_req_mgr_slot          *slot = NULL;
+	struct crm_task_payload          *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld",
+		sched_req->link_hdl,
+		sched_req->req_id);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	slot = &in_q->slot[in_q->wr_idx];
+
+	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+		CRM_ERR("in_q overwrite %d", slot->status);
+		/* @TODO: error handling */
+	}
+	CRM_DBG("sched_req %lld at slot %d",
+		sched_req->req_id, in_q->wr_idx);
+
+	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
+	slot->req_id = sched_req->req_id;
+	slot->skip_idx = 0;
+	slot->recover = sched_req->bubble_enable;
+	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_add_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_add_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx;
+	struct cam_req_mgr_add_request      *add_req = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_req_tbl          *tbl = NULL;
+	struct cam_req_mgr_tbl_slot         *slot = NULL;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	add_req = (struct cam_req_mgr_add_request *)&task_data->u;
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		if (device->dev_hdl == add_req->dev_hdl) {
+			tbl = device->pd_tbl;
+			break;
+		}
+	}
+	if (!tbl) {
+		CRM_ERR("dev_hdl not found %x, %x %x",
+			add_req->dev_hdl,
+			link->l_dev[0].dev_hdl,
+			link->l_dev[1].dev_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	/*
+	 * Go through request table and add
+	 * request id to proper table
+	 * 1. find req slot in in_q matching req_id.sent by dev
+	 * 2. goto table of this device based on p_delay
+	 * 3. mark req_ready_map with this dev_bit.
+	 */
+
+	mutex_lock(&link->req.lock);
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -EBADSLT;
+		mutex_unlock(&link->req.lock);
+		goto end;
+	}
+	slot = &tbl->slot[idx];
+	if (slot->state != CRM_REQ_STATE_PENDING &&
+		slot->state != CRM_REQ_STATE_EMPTY) {
+		CRM_WARN("Unexpected state %d for slot %d map %x",
+			slot->state, idx, slot->req_ready_map);
+	}
+
+	slot->state = CRM_REQ_STATE_PENDING;
+	slot->req_ready_map |= (1 << device->dev_bit);
+
+	CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
+		slot->req_ready_map);
+
+	if (slot->req_ready_map == tbl->dev_mask) {
+		CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+			idx, add_req->req_id, tbl->pd);
+		slot->state = CRM_REQ_STATE_READY;
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_error()
+ *
+ * @brief: This runs in workque thread context. bubble /err recovery.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_error(void *priv, void *data)
+{
+	int                                  rc = 0, idx = -1, i;
+	struct cam_req_mgr_error_notify     *err_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld error %d",
+		err_info->link_hdl,
+		err_info->req_id,
+		err_info->error);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			err_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				err_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (!slot->recover) {
+				CRM_WARN("err recovery disabled req_id %lld",
+					err_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return 0;
+			} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
+			&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be recovered %d",
+					err_info->req_id, slot->status);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			/* Notify all devices in the link about error */
+			for (i = 0; i < link->num_devs; i++) {
+				device = &link->l_dev[i];
+				if (device != NULL) {
+					evt_data.dev_hdl = device->dev_hdl;
+					evt_data.evt_type =
+						CAM_REQ_MGR_LINK_EVT_ERR;
+					evt_data.link_hdl =  link->link_hdl;
+					evt_data.req_id = err_info->req_id;
+					evt_data.u.error = err_info->error;
+					if (device->ops &&
+						device->ops->process_evt)
+						rc = device->ops->
+							process_evt(&evt_data);
+				}
+			}
+			/* Bring processing pointer to bubbled req id */
+			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+			in_q->rd_idx = idx;
+			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+			mutex_lock(&link->lock);
+			link->state = CAM_CRM_LINK_STATE_ERR;
+			mutex_unlock(&link->lock);
+		}
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
  * cam_req_mgr_process_sof()
  *
  * @brief: This runs in workque thread context. Call core funcs to check
- * which peding requests can be processed.
- * @data:contains information about frame_id, link etc.
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
  *
- * Returns 0 on success.
+ * @return: 0 on success.
  */
 static int cam_req_mgr_process_sof(void *priv, void *data)
 {
-	int ret = 0, i = 0;
-	struct cam_req_mgr_sof_notify *sof_data = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
-	struct cam_req_mgr_connected_device *device = NULL;
-	struct cam_req_mgr_apply_request apply_req;
+	int                                  rc = 0;
+	struct cam_req_mgr_sof_notify       *sof_data = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
 		CRM_ERR("input args NULL %pK %pK", data, priv);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
-	sof_data = (struct cam_req_mgr_sof_notify *)data;
+	task_data = (struct crm_task_payload *)data;
+	sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
 
 	CRM_DBG("link_hdl %x frame_id %lld",
 		sof_data->link_hdl,
 		sof_data->frame_id);
 
-	apply_req.link_hdl = sof_data->link_hdl;
-	/* @TODO: go through request table and issue
-	 * request id based on dev status
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	/*
+	 * Check if current read index is in applied state, if yes make it free
+	 *    and increment read index to next slot.
 	 */
-	apply_req.request_id = sof_data->frame_id;
-	apply_req.report_if_bubble = 0;
+	CRM_DBG("link_hdl %x curent idx %d req_status %d",
+		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
-	CRM_DBG("link %pK l_dev %pK num_dev %d",
-		link, link->l_devices, link->num_connections);
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		if (device != NULL) {
-			CRM_DBG("dev_id %d dev_hdl %x ops %pK p_delay %d",
-				device->dev_info.dev_id, device->dev_hdl,
-				device->ops, device->dev_info.p_delay);
-			apply_req.dev_hdl = device->dev_hdl;
-			if (device->ops && device->ops->apply_req) {
-				ret = device->ops->apply_req(&apply_req);
-				/* Error handling for this failure is pending */
-				if (ret < 0)
-					CRM_ERR("Failure:%d dev=%d", ret,
-						device->dev_info.dev_id);
-			}
+	if (link->state == CAM_CRM_LINK_STATE_ERR)
+		CRM_WARN("Error recovery idx %d status %d",
+			in_q->rd_idx,
+			in_q->slot[in_q->rd_idx].status);
 
-		}
+	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
+		/*
+		 * Do NOT reset req q slot data here, it can not be done
+		 * here because we need to preserve the data to handle bubble.
+		 */
+		__cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
 	}
+	rc = __cam_req_mgr_process_req(link);
+	mutex_unlock(&link->req.lock);
 
 end:
-	return ret;
+	return rc;
 }
 
-/**
- * cam_req_mgr_notify_sof()
- *
- * @brief: SOF received from device, sends trigger through workqueue
- * @sof_data: contains information about frame_id, link etc.
- *
- * Returns 0 on success
- */
-static int cam_req_mgr_cb_notify_sof(struct cam_req_mgr_sof_notify *sof_data)
-{
-	int                           ret = 0;
-	struct crm_workq_task        *task = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!sof_data) {
+/* Linked devices' Callback section */
+
+/**
+ * cam_req_mgr_cb_add_req()
+ *
+ * @brief    : Drivers call this function to notify new packet is available.
+ * @add_req  : Information about new request available at a device.
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
+{
+	int                             rc = 0, idx;
+	struct crm_workq_task          *task = NULL;
+	struct cam_req_mgr_core_link   *link = NULL;
+	struct cam_req_mgr_add_request *dev_req;
+	struct crm_task_payload        *task_data;
+
+	if (!add_req) {
 		CRM_ERR("sof_data is NULL");
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 
-	CRM_DBG("link_hdl %x frame_id %lld",
-		sof_data->link_hdl,
-		sof_data->frame_id);
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(add_req->link_hdl);
+
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Validate if req id is present in input queue */
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -ENOENT;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task dev %x req %lld",
+			add_req->dev_hdl, add_req->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
+	dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
+	dev_req->req_id = add_req->req_id;
+	dev_req->link_hdl = add_req->link_hdl;
+	dev_req->dev_hdl = add_req->dev_hdl;
+	task->process_cb = &cam_req_mgr_process_add_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_err()
+ *
+ * @brief    : Error received from device, sends bubble recovery
+ * @err_info : contains information about error occurred like bubble/overflow
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_notify_err(
+	struct cam_req_mgr_error_notify *err_info)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_error_notify *notify_err;
+	struct crm_task_payload         *task_data;
+
+	if (!err_info) {
+		CRM_ERR("err_info is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(err_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	crm_timer_reset(link->watchdog);
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task req_id %lld", err_info->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
+	notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
+	notify_err->req_id = err_info->req_id;
+	notify_err->link_hdl = err_info->link_hdl;
+	notify_err->dev_hdl = err_info->dev_hdl;
+	notify_err->error = err_info->error;
+	task->process_cb = &cam_req_mgr_process_error;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_sof()
+ *
+ * @brief   : SOF received from device, sends trigger through workqueue
+ * @sof_data: contains information about frame_id, link etc.
+ *
+ * @return  : 0 on success
+ *
+ */
+static int cam_req_mgr_cb_notify_sof(
+	struct cam_req_mgr_sof_notify *sof_data)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_sof_notify   *notify_sof;
+	struct crm_task_payload         *task_data;
+
+	if (!sof_data) {
+		CRM_ERR("sof_data is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
 
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sof_data->link_hdl);
 	if (!link) {
 		CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
-
 	}
 
+	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
 		CRM_ERR("no empty task frame %lld", sof_data->frame_id);
-		ret = -EBUSY;
+		rc = -EBUSY;
 		goto end;
 	}
-	task->type = CRM_WORKQ_TASK_NOTIFY_SOF;
-	task->u.notify_sof.frame_id = sof_data->frame_id;
-	task->u.notify_sof.link_hdl = sof_data->link_hdl;
-	task->u.notify_sof.dev_hdl = sof_data->dev_hdl;
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
+	notify_sof = (struct cam_req_mgr_sof_notify *)&task_data->u;
+	notify_sof->frame_id = sof_data->frame_id;
+	notify_sof->link_hdl = sof_data->link_hdl;
+	notify_sof->dev_hdl = sof_data->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_sof;
-	task->priv = link;
-	cam_req_mgr_workq_enqueue_task(task);
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
 end:
-	return ret;
+	return rc;
 }
 
+static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
+	.notify_sof = cam_req_mgr_cb_notify_sof,
+	.notify_err = cam_req_mgr_cb_notify_err,
+	.add_req    = cam_req_mgr_cb_add_req,
+};
+
 /**
- * cam_req_mgr_pvt_reserve_link()
+ * __cam_req_mgr_setup_link_info()
  *
- * @brief: Reserves one link data struct within session
- * @session: session identifier
+ * @brief     : Sets up input queue, create pd based tables, communicate with
+ *              devs connected on this link and setup communication.
+ * @link      : pointer to link to setup
+ * @link_info : link_info coming from CSL to prepare link
  *
- * Returns pointer to link reserved
+ * @return    : 0 on success, negative in case of failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_reserve_link(
-	struct cam_req_mgr_core_session *session)
+static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_link_info *link_info)
 {
-	int32_t i;
-	struct cam_req_mgr_core_link *link;
+	int                                     rc = 0, i = 0;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_req_tbl             *pd_tbl;
+	enum cam_pipeline_delay                 max_delay;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
-		return NULL;
-	}
+	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
+		return -EPERM;
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_state == CAM_CRM_LINK_STATE_AVAILABLE) {
-			link->num_connections = 0;
-			link->max_pipeline_delay = 0;
-			memset(link->req_table, 0,
-				sizeof(struct cam_req_mgr_request_table));
-			link->link_state = CAM_CRM_LINK_STATE_IDLE;
-			spin_unlock(&link->lock);
-			break;
+	mutex_init(&link->req.lock);
+	CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+	link->req.num_tbl = 0;
+
+	rc = __cam_req_mgr_setup_in_q(&link->req);
+	if (rc < 0)
+		return rc;
+
+	mutex_lock(&link->lock);
+	max_delay = CAM_PIPELINE_DELAY_0;
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+		/* Using dev hdl, get ops ptr to communicate with device */
+		dev->ops = (struct cam_req_mgr_kmd_ops *)
+			cam_get_device_ops(link_info->dev_hdls[i]);
+		if (!dev->ops ||
+			!dev->ops->get_dev_info ||
+			!dev->ops->link_setup) {
+			CRM_ERR("FATAL: device ops NULL");
+			rc = -ENXIO;
+			goto error;
 		}
-		spin_unlock(&link->lock);
-	}
-	CRM_DBG("Link available (total %d)", session->num_active_links);
-	spin_unlock(&session->lock);
-
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-
-	return link;
-}
-
-/**
- * cam_req_mgr_pvt_create_subdevs()
- *
- * @brief: Create new crm  subdev to link with realtime devices
- * @l_devices: list of subdevs internal to crm
- * @num_dev: num of subdevs to be created for link
- *
- * Returns pointer to allocated list of devices
- */
-static struct cam_req_mgr_connected_device *
-	cam_req_mgr_pvt_create_subdevs(int32_t num_dev)
-{
-	struct cam_req_mgr_connected_device *l_devices;
-
-	l_devices = (struct cam_req_mgr_connected_device *)
-		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
-		GFP_KERNEL);
-	if (!l_devices)
-		CRM_DBG("Insufficient memory %lu",
-			sizeof(struct cam_req_mgr_connected_device) * num_dev);
-
-	return l_devices;
-}
-
-/**
- * cam_req_mgr_pvt_destroy_subdev()
- *
- * @brief: Cleans up the subdevs allocated by crm for link
- * @l_device: pointer to list of subdevs crm created
- *
- * Returns 0 for success
- */
-static int cam_req_mgr_pvt_destroy_subdev(
-	struct cam_req_mgr_connected_device **l_device)
-{
-	int ret = 0;
-
-	if (!(*l_device))
-		ret = -EINVAL;
-	else {
-		kfree(*l_device);
-		*l_device = NULL;
+		dev->dev_hdl = link_info->dev_hdls[i];
+		dev->parent = (void *)link;
+		dev->dev_info.dev_hdl = dev->dev_hdl;
+		rc = dev->ops->get_dev_info(&dev->dev_info);
+		CRM_DBG("%x: connected: %s, id %d, delay %d",
+			link_info->session_hdl, dev->dev_info.name,
+			dev->dev_info.dev_id, dev->dev_info.p_delay);
+		if (rc < 0 ||
+			dev->dev_info.p_delay >=
+			CAM_PIPELINE_DELAY_MAX ||
+			dev->dev_info.p_delay <
+			CAM_PIPELINE_DELAY_0) {
+			CRM_ERR("get device info failed");
+			goto error;
+		} else {
+			CRM_DBG("%x: connected: %s, delay %d",
+				link_info->session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.p_delay);
+			if (dev->dev_info.p_delay >
+				max_delay)
+			max_delay =
+				dev->dev_info.p_delay;
+		}
 	}
 
-	return ret;
+
+	link_data.link_enable = 1;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = &cam_req_mgr_ops;
+	link_data.max_delay = max_delay;
+
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+
+		link_data.dev_hdl = dev->dev_hdl;
+		/*
+		 * For unique pipeline delay table create request
+		 * tracking table
+		 */
+		if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
+			pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
+				dev->dev_info.p_delay);
+			if (!pd_tbl) {
+				CRM_ERR("pd %d tbl not found",
+					dev->dev_info.p_delay);
+				rc = -ENXIO;
+				goto error;
+			}
+		} else {
+			pd_tbl = __cam_req_mgr_create_pd_tbl(
+				dev->dev_info.p_delay);
+			if (pd_tbl == NULL) {
+				CRM_ERR("create new pd tbl failed");
+				rc = -ENXIO;
+				goto error;
+			}
+			pd_tbl->pd = dev->dev_info.p_delay;
+			link->pd_mask |= (1 << pd_tbl->pd);
+			/*
+			 * Add table to list and also sort list
+			 * from max pd to lowest
+			 */
+			__cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
+		}
+		dev->dev_bit = pd_tbl->dev_count++;
+		dev->pd_tbl = pd_tbl;
+		pd_tbl->dev_mask |= (1 << dev->dev_bit);
+
+		/* Communicate with dev to establish the link */
+		dev->ops->link_setup(&link_data);
+
+		if (link->max_delay < dev->dev_info.p_delay)
+			link->max_delay = dev->dev_info.p_delay;
+	}
+	link->num_devs = link_info->num_devices;
+
+	/* Assign id for pd tables */
+	__cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
+
+	/* At start, expect max pd devices, all are in skip state */
+	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+
+	mutex_unlock(&link->lock);
+	return 0;
+
+error:
+	__cam_req_mgr_destroy_link_info(link);
+	return rc;
 }
 
+/* IOCTLs handling section */
 int cam_req_mgr_create_session(
 	struct cam_req_mgr_session_info *ses_info)
 {
-	int ret = 0;
-	int32_t i;
-	int32_t session_hdl;
-	struct cam_req_mgr_core_session *cam_session;
+	int                              rc = 0;
+	int32_t                          session_hdl;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		kzalloc(sizeof(*cam_session), GFP_KERNEL);
 	if (!cam_session) {
-		ret = -ENOMEM;
+		rc = -ENOMEM;
 		goto end;
 	}
 
 	session_hdl = cam_create_session_hdl((void *)cam_session);
 	if (session_hdl < 0) {
 		CRM_ERR("unable to create session_hdl = %x", session_hdl);
-		ret = session_hdl;
-		goto session_hdl_failed;
+		rc = session_hdl;
+		kfree(cam_session);
+		goto end;
 	}
 	ses_info->session_hdl = session_hdl;
+
+	mutex_init(&cam_session->lock);
+	CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+
+	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
-
-	spin_lock_init(&cam_session->lock);
-	cam_session->num_active_links = 0;
-
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		spin_lock_init(&cam_session->links[i].lock);
-		cam_session->links[i].link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-		INIT_LIST_HEAD(&cam_session->links[i].link_head);
-		cam_session->links[i].workq = NULL;
-	}
+	cam_session->num_links = 0;
 	list_add(&cam_session->entry, &g_crm_core_dev->session_head);
-
-	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
-session_hdl_failed:
-	kfree(cam_session);
+	mutex_unlock(&cam_session->lock);
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
+	return rc;
 }
 
 int cam_req_mgr_destroy_session(
 		struct cam_req_mgr_session_info *ses_info)
 {
-	int ret;
-	int32_t i;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link = NULL;
+	int rc;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(ses_info->session_hdl);
-	if (cam_session == NULL) {
+	if (!cam_session) {
 		CRM_ERR("failed to get session priv");
-		ret = -ENOENT;
+		rc = -ENOENT;
 		goto end;
 
 	}
-	spin_lock(&cam_session->lock);
-	for (i = 0; i < cam_session->num_active_links; i++) {
-		link = &cam_session->links[i];
-		CRM_ERR("session %x active_links %d hdl %x connections %d",
+	mutex_lock(&cam_session->lock);
+	if (cam_session->num_links) {
+		CRM_ERR("destroy session %x num_active_links %d",
 			ses_info->session_hdl,
-			cam_session->num_active_links,
-			link->link_hdl, link->num_connections);
+			cam_session->num_links);
+		/* @TODO : Go through active links and destroy ? */
 	}
 	list_del(&cam_session->entry);
-	spin_unlock(&cam_session->lock);
+	mutex_unlock(&cam_session->lock);
+	mutex_destroy(&cam_session->lock);
 	kfree(cam_session);
 
-	ret = cam_destroy_session_hdl(ses_info->session_hdl);
-	if (ret)
-		CRM_ERR("unable to destroy session_hdl = %x ret %d",
-			ses_info->session_hdl, ret);
+	rc = cam_destroy_session_hdl(ses_info->session_hdl);
+	if (rc < 0)
+		CRM_ERR("unable to destroy session_hdl = %x rc %d",
+			ses_info->session_hdl, rc);
 
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
+	return rc;
 }
 
 int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 {
-	int ret = 0;
-	int32_t i, link_hdl;
-	char buf[128];
-	struct cam_create_dev_hdl root_dev;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_core_dev_link_setup link_data;
-	struct cam_req_mgr_connected_device *l_devices;
-	enum cam_pipeline_delay max_delay = CAM_PIPELINE_DELAY_0;
+	int                                     rc = 0;
+	char                                    buf[128];
+	struct cam_create_dev_hdl               root_dev;
+	struct cam_req_mgr_core_session        *cam_session;
+	struct cam_req_mgr_core_link           *link;
 
 	if (!link_info) {
-		CRM_ERR("NULL pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
-
 	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
 		CRM_ERR("Invalid num devices %d", link_info->num_devices);
 		return -EINVAL;
 	}
 
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(link_info->session_hdl);
 	if (!cam_session) {
-		CRM_ERR("NULL session pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_reserve_link(cam_session);
+	mutex_lock(&g_crm_core_dev->crm_lock);
+
+	/* Allocate link struct and map it with session's request queue */
+	link = __cam_req_mgr_reserve_link(cam_session);
 	if (!link) {
-		CRM_ERR("NULL link pointer");
+		CRM_ERR("failed to reserve new link");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	CRM_DBG("link reserved %pK %x", link, link->link_hdl);
 
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->session_hdl;
 	root_dev.priv = (void *)link;
 
-	link_hdl = cam_create_device_hdl(&root_dev);
-	if (link_hdl < 0) {
+	mutex_lock(&link->lock);
+	/* Create unique dev handle for link */
+	link->link_hdl = cam_create_device_hdl(&root_dev);
+	if (link->link_hdl < 0) {
 		CRM_ERR("Insufficient memory to create new device handle");
-		ret = link_hdl;
+		mutex_unlock(&link->lock);
+		rc = link->link_hdl;
 		goto link_hdl_fail;
 	}
+	mutex_unlock(&link->lock);
+	link_info->link_hdl = link->link_hdl;
 
-	l_devices = cam_req_mgr_pvt_create_subdevs(link_info->num_devices);
-	if (!l_devices) {
-		ret = -ENOMEM;
+	/* Allocate memory to hold data of all linked devs */
+	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+		link_info->num_devices);
+	if (rc < 0) {
+		CRM_ERR("Insufficient memory to create new crm subdevs");
 		goto create_subdev_failed;
 	}
 
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->get_dev_info) {
-				ret = l_devices[i].ops->get_dev_info(
-					&l_devices[i].dev_info);
-				if (ret < 0 ||
-					l_devices[i].dev_info.p_delay >=
-					CAM_PIPELINE_DELAY_MAX ||
-					l_devices[i].dev_info.p_delay <
-					CAM_PIPELINE_DELAY_0) {
-					CRM_ERR("get device info failed");
-					goto error;
-				} else {
-					CRM_DBG("%x: connected: %s, delay %d",
-						link_info->session_hdl,
-						l_devices[i].dev_info.name,
-						l_devices[i].dev_info.p_delay);
-					if (l_devices[i].dev_info.p_delay >
-						max_delay)
-					max_delay =
-						l_devices[i].dev_info.p_delay;
-				}
-			}
-		} else {
-			CRM_ERR("FATAL: device ops NULL");
-			ret = -ENXIO;
-			goto error;
-		}
-	}
+	/* Using device ops query connected devs, prepare request tables */
+	rc = __cam_req_mgr_setup_link_info(link, link_info);
+	if (rc < 0)
+		goto setup_failed;
 
-	link_data.link_enable = true;
-	link_data.link_hdl = link_hdl;
-	link_data.crm_cb = &cam_req_mgr_ops;
-	link_data.max_delay = max_delay;
-
-	/* After getting info about all devices, establish link */
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->link_setup) {
-				ret = l_devices[i].ops->link_setup(&link_data);
-				if (ret < 0) {
-					/* TODO check handlng of this failure */
-					CRM_ERR("link setup failed");
-					goto error;
-				}
-			}
-		}
-		list_add_tail(&l_devices[i].entry, &link->link_head);
-	}
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_READY;
+	mutex_unlock(&link->lock);
 
 	/* Create worker for current link */
-	snprintf(buf, sizeof(buf), "%x-%x", link_info->session_hdl, link_hdl);
-	ret = cam_req_mgr_workq_create(buf, &link->workq);
-	if (ret < 0) {
+	snprintf(buf, sizeof(buf), "%x-%x",
+		link_info->session_hdl, link->link_hdl);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
-		goto error;
+		__cam_req_mgr_destroy_link_info(link);
+		goto setup_failed;
 	}
 
-	link_info->link_hdl = link_hdl;
-	spin_lock(&link->lock);
-	link->l_devices = l_devices;
-	link->link_hdl = link_hdl;
-	link->parent = (void *)cam_session;
-	link->num_connections = link_info->num_devices;
-	link->link_state = CAM_CRM_LINK_STATE_READY;
-	spin_unlock(&link->lock);
+	/* Assign payload to workqueue tasks */
+	rc = __cam_req_mgr_setup_payload(link->workq);
+	if (rc < 0) {
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links++;
-	spin_unlock(&cam_session->lock);
+	/* Start watchdong timer to detect if camera hw goes into bad state */
+	rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
+		link, &__cam_req_mgr_sof_freeze);
+	if (rc < 0) {
+		kfree(link->workq->task.pool[0].payload);
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	return ret;
-
-error:
-	cam_req_mgr_pvt_destroy_subdev(&l_devices);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+setup_failed:
+	__cam_req_mgr_destroy_subdev(link->l_dev);
 create_subdev_failed:
-	cam_destroy_device_hdl(link_hdl);
+	cam_destroy_device_hdl(link->link_hdl);
+	link_info->link_hdl = 0;
 link_hdl_fail:
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	spin_unlock(&link->lock);
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	mutex_unlock(&link->lock);
 
-	return ret;
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
 }
 
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
 {
-	int ret = 0;
-	int32_t i = 0;
+	int                              rc = 0;
 	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_connected_device *device;
-	struct cam_req_mgr_core_dev_link_setup link_data;
+	struct cam_req_mgr_core_link    *link;
 
 	if (!unlink_info) {
 		CRM_ERR("NULL pointer");
 		return -EINVAL;
 	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
-	cam_get_device_priv(unlink_info->session_hdl);
+		cam_get_device_priv(unlink_info->session_hdl);
 	if (!cam_session) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_find_link(cam_session,
-		unlink_info->link_hdl);
+	/* link hdl's priv data is core_link struct */
+	link = cam_get_device_priv(unlink_info->link_hdl);
 	if (!link) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	__cam_req_mgr_print_req_tbl(&link->req);
 
-	ret = cam_destroy_device_hdl(link->link_hdl);
-	if (ret < 0) {
-		CRM_ERR("error in destroying dev handle %d %x",
-			ret, link->link_hdl);
-		ret = -EINVAL;
-	}
-	link_data.link_enable = false;
-	link_data.link_hdl = link->link_hdl;
-	link_data.crm_cb = NULL;
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		link_data.dev_hdl = device->dev_hdl;
-		if (device->ops && device->ops->link_setup)
-			device->ops->link_setup(&link_data);
-		device->dev_hdl = 0;
-		device->parent = NULL;
-		device->ops = NULL;
-		list_del(&device->entry);
-	}
-	/* Destroy worker of link */
-	cam_req_mgr_workq_destroy(link->workq);
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	link->parent = NULL;
-	link->num_connections = 0;
-	link->link_hdl = 0;
-	link->workq = NULL;
-	spin_unlock(&link->lock);
+	/* Destroy workq payload data */
+	kfree(link->workq->task.pool[0].payload);
+	link->workq->task.pool[0].payload = NULL;
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links--;
-	spin_unlock(&cam_session->lock);
+	/* Destroy workq and timer of link */
+	crm_timer_exit(&link->watchdog);
 
-	ret = cam_req_mgr_pvt_destroy_subdev(&link->l_devices);
-	if (ret < 0) {
-		CRM_ERR("error while destroying subdev link %x",
-			link_data.link_hdl);
-		ret = -EINVAL;
+	cam_req_mgr_workq_destroy(&link->workq);
+
+	/* Cleanuprequest tables */
+	__cam_req_mgr_destroy_link_info(link);
+
+	/* Free memory holding data of linked devs */
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+
+	/* Destroy the link handle */
+	rc = cam_destroy_device_hdl(unlink_info->link_hdl);
+	if (rc < 0) {
+		CRM_ERR("error while destroying dev handle %d %x",
+			rc, link->link_hdl);
 	}
 
-	return ret;
+	/* Free curent link and put back into session's free pool of links */
+	__cam_req_mgr_unreserve_link(cam_session, &link);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+
+	return rc;
 }
 
 int cam_req_mgr_schedule_request(
 			struct cam_req_mgr_sched_request *sched_req)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_core_session  *session = NULL;
+	struct cam_req_mgr_sched_request *sched;
+	struct crm_task_payload          *task_data;
+
 	if (!sched_req) {
-		CRM_ERR("NULL pointer");
+		CRM_ERR("csl_req is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(sched_req->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+		return -EINVAL;
+	}
+	session = (struct cam_req_mgr_core_session *)link->parent;
+	if (!session) {
+		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task)
+		return -ENOMEM;
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	sched->req_id = sched_req->req_id;
+	sched->link_hdl = sched_req->link_hdl;
+	if (session->force_err_recovery == AUTO_RECOVERY) {
+		sched->bubble_enable = sched_req->bubble_enable;
+	} else {
+		sched->bubble_enable =
+		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
+	}
+	task->process_cb = &cam_req_mgr_process_sched_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
-int cam_req_mgr_sync_mode(
-			struct cam_req_mgr_sync_mode *sync_links)
+int cam_req_mgr_sync_link(
+	struct cam_req_mgr_sync_mode *sync_links)
 {
 	if (!sync_links) {
 		CRM_ERR("NULL pointer");
@@ -611,15 +1983,70 @@
 }
 
 int cam_req_mgr_flush_requests(
-			struct cam_req_mgr_flush_info *flush_info)
+	struct cam_req_mgr_flush_info *flush_info)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_flush_info    *flush;
+	struct crm_task_payload          *task_data;
+	struct cam_req_mgr_core_session  *session = NULL;
+
 	if (!flush_info) {
-		CRM_ERR("NULL pointer");
-		return -EINVAL;
+		CRM_ERR("flush req is NULL");
+		rc = -EFAULT;
+		goto end;
+	}
+	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
+		CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+		rc = -EINVAL;
+		goto end;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	/* session hdl's priv data is cam session struct */
+	session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(flush_info->session_hdl);
+	if (!session) {
+		CRM_ERR("Invalid session %x", flush_info->session_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	if (session->num_links <= 0) {
+		CRM_WARN("No active links in session %x",
+		flush_info->session_hdl);
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(flush_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
+	flush = (struct cam_req_mgr_flush_info *)&task_data->u;
+	flush->req_id = flush_info->req_id;
+	flush->link_hdl = flush_info->link_hdl;
+	flush->flush_type = flush_info->flush_type;
+	task->process_cb = &cam_req_mgr_process_flush_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
 
@@ -639,6 +2066,7 @@
 	CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
 	INIT_LIST_HEAD(&g_crm_core_dev->session_head);
 	mutex_init(&g_crm_core_dev->crm_lock);
+	cam_req_mgr_debug_register(g_crm_core_dev);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 7679f20..889ee9c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -14,213 +14,344 @@
 
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_timer.h"
 
-#define CAM_REQ_MGR_MAX_LINKED_DEV 16
+#define CAM_REQ_MGR_MAX_LINKED_DEV     16
+#define MAX_REQ_SLOTS                  48
+
+#define CAM_REQ_MGR_WATCHDOG_TIMEOUT   5000
+#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT  1000
+#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
+
+#define FORCE_DISABLE_RECOVERY  2
+#define FORCE_ENABLE_RECOVERY   1
+#define AUTO_RECOVERY           0
+
+#define CRM_WORKQ_NUM_TASKS 30
 
 /**
- * enum crm_req_status
- * State machine for life cycle of request in link
- * EMPTY - indicates req slot is empty
- * PENDING - indicates req slot is waiting for reqs from all devs
- * READY - indicates req slot is ready to be sent to devs
- * APPLIED - indicates req slot is sent to devices
- * INVALID - indicates req slot is not in valid state
+ * enum crm_workq_task_type
+ * @codes: to identify which type of task is present
  */
-enum crm_req_status {
-	CRM_REQ_STATUS_EMPTY,
-	CRM_REQ_STATUS_PENDING,
-	CRM_REQ_STATUS_READY,
-	CRM_REQ_STATUS_APPLIED,
-	CRM_REQ_STATUS_INVALID,
+enum crm_workq_task_type {
+	CRM_WORKQ_TASK_GET_DEV_INFO,
+	CRM_WORKQ_TASK_SETUP_LINK,
+	CRM_WORKQ_TASK_DEV_ADD_REQ,
+	CRM_WORKQ_TASK_APPLY_REQ,
+	CRM_WORKQ_TASK_NOTIFY_SOF,
+	CRM_WORKQ_TASK_NOTIFY_ERR,
+	CRM_WORKQ_TASK_SCHED_REQ,
+	CRM_WORKQ_TASK_FLUSH_REQ,
+	CRM_WORKQ_TASK_INVALID,
+};
+
+/**
+ * struct crm_task_payload
+ * @type       : to identify which type of task is present
+ * @u          : union of payload of all types of tasks supported
+ * @sched_req  : contains info of  incoming reqest from CSL to CRM
+ * @flush_info : contains info of cancelled reqest
+ * @dev_req    : contains tracking info of available req id at device
+ * @send_req   : contains info of apply settings to be sent to devs in link
+ * @apply_req  : contains info of which request is applied at device
+ * @notify_sof : contains notification from IFE to CRM about SOF trigger
+ * @notify_err : contains error info happened while processing request
+ * -
+ */
+struct crm_task_payload {
+	enum crm_workq_task_type type;
+	union {
+		struct cam_req_mgr_sched_request        sched_req;
+		struct cam_req_mgr_flush_info           flush_info;
+		struct cam_req_mgr_add_request          dev_req;
+		struct cam_req_mgr_send_request         send_req;
+		struct cam_req_mgr_sof_notify           notify_sof;
+		struct cam_req_mgr_error_notify         notify_err;
+	} u;
+};
+
+/**
+ * enum crm_req_state
+ * State machine for life cycle of request in pd table
+ * EMPTY   : indicates req slot is empty
+ * PENDING : indicates req slot is waiting for reqs from all devs
+ * READY   : indicates req slot is ready to be sent to devs
+ * INVALID : indicates req slot is not in valid state
+ */
+enum crm_req_state {
+	CRM_REQ_STATE_EMPTY,
+	CRM_REQ_STATE_PENDING,
+	CRM_REQ_STATE_READY,
+	CRM_REQ_STATE_INVALID,
+};
+
+/**
+ * enum crm_slot_status
+ * State machine for life cycle of request in input queue
+ * NO_REQ     : empty slot
+ * REQ_ADDED  : new entry in slot
+ * INCOMPLETE : waiting for
+ * APPLIED    : req is sent to devices
+ * INVALID    : invalid state
+ */
+enum crm_slot_status {
+	CRM_SLOT_STATUS_NO_REQ,
+	CRM_SLOT_STATUS_REQ_ADDED,
+	CRM_SLOT_STATUS_REQ_PENDING,
+	CRM_SLOT_STATUS_REQ_APPLIED,
+	CRM_SLOT_STATUS_INVALID,
 };
 
 /**
  * enum cam_req_mgr_link_state
  * State machine for life cycle of link in crm
- * AVAILABLE - indicates link is not in use
- * IDLE - indicates link is reserved but not initialized
- * READY - indicates link is initialized and ready for operation
- * STREAMING - indicates link is receiving triggers and requests
- * BUBBLE_DETECTED - indicates device on link is in bad shape
- * ROLLBACK_STARTED - indicates link had triggered error recovery
- * MAX - indicates link max as invalid
+ * AVAILABLE  : link available
+ * IDLE       : link initialized but not ready yet
+ * READY      : link is ready for use
+ * ERR	      : link has encountered error
+ * MAX        : invalid state
  */
 enum cam_req_mgr_link_state {
 	CAM_CRM_LINK_STATE_AVAILABLE,
 	CAM_CRM_LINK_STATE_IDLE,
 	CAM_CRM_LINK_STATE_READY,
-	CAM_CRM_LINK_STATE_STREAMING,
-	CAM_CRM_LINK_STATE_BUBBLE_DETECTED,
-	CAM_CRM_LINK_STATE_ROLLBACK_STARTED,
-	CAM_CRM_LINK_STATE_DEVICE_STATE_MAX,
+	CAM_CRM_LINK_STATE_ERR,
+	CAM_CRM_LINK_STATE_MAX,
 };
 
 /**
- * struct cam_req_mgr_request_slot
- * @idx: device handle
- * @req_status: state machine for life cycle of a request
- * @request_id: request id value
+ * struct cam_req_mgr_traverse
+ * @idx        : slot index
+ * @result     : contains which all tables were able to apply successfully
+ * @tbl        : pointer of pipeline delay based request table
+ * @apply_data : pointer which various tables will update during traverse
+ * @in_q       : input request queue pointer
  */
-struct cam_req_mgr_request_slot {
+struct cam_req_mgr_traverse {
+	int32_t                       idx;
+	uint32_t                      result;
+	struct cam_req_mgr_req_tbl   *tbl;
+	struct cam_req_mgr_apply     *apply_data;
+	struct cam_req_mgr_req_queue *in_q;
+};
+
+/**
+ * struct cam_req_mgr_apply
+ * @idx      : corresponding input queue slot index
+ * @pd       : pipeline delay of device
+ * @req_id   : req id for dev with above pd to process
+ * @skip_idx: skip applying settings when this is set.
+ */
+struct cam_req_mgr_apply {
 	int32_t idx;
-	enum crm_req_status req_status;
-	int64_t request_id;
+	int32_t pd;
+	int64_t req_id;
+	int32_t skip_idx;
 };
 
 /**
- * struct cam_req_mgr_request_queue
- * @read_index: idx currently being processed
- * @write_index: idx at which incoming req is stored
- * @num_slots: num of req slots i.e. queue depth
- * @req_slot: slots which hold the request info
+ * struct cam_req_mgr_tbl_slot
+ * @idx           : slot index
+ * @req_ready_map : mask tracking which all devices have request ready
+ * @state         : state machine for life cycle of a slot
  */
-struct cam_req_mgr_request_queue {
-	int32_t read_index;
-	int32_t write_index;
-	uint32_t num_slots;
-	struct cam_req_mgr_request_slot *req_slot;
+struct cam_req_mgr_tbl_slot {
+	int32_t             idx;
+	uint32_t            req_ready_map;
+	enum crm_req_state  state;
 };
 
 /**
- * struct cam_req_mgr_frame_settings
- * @request_id: request id to apply
- * @frame_id: frame id for debug purpose
+ * struct cam_req_mgr_req_tbl
+ * @id            : table indetifier
+ * @pd            : pipeline delay of table
+ * @dev_count     : num of devices having same pipeline delay
+ * @dev_mask      : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ *              by this table especially in the beginning or bubble recovery
+ * @next          : pointer to next pipeline delay request table
+ * @pd_delta      : differnce between this table's pipeline delay and next
+ * @num_slots     : number of request slots present in the table
+ * @slot          : array of slots tracking requests availability at devices
  */
-struct cam_req_mgr_frame_settings {
-	int64_t request_id;
-	int64_t frame_id;
+struct cam_req_mgr_req_tbl {
+	int32_t                     id;
+	int32_t                     pd;
+	int32_t                     dev_count;
+	int32_t                     dev_mask;
+	int32_t                     skip_traverse;
+	struct cam_req_mgr_req_tbl *next;
+	int32_t                     pd_delta;
+	int32_t                     num_slots;
+	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
 };
 
 /**
- * struct cam_req_mgr_request_table
- * @pipeline_delay: pipeline delay of this req table
- * @l_devices: list of devices belonging to this p_delay
- * @dev_mask: each dev hdl has unique bit assigned, dev mask tracks if all devs
- *  received req id packet from UMD to process
+ * struct cam_req_mgr_slot
+ * - Internal Book keeping
+ * @idx      : slot index
+ * @skip_idx : if req id in this slot needs to be skipped/not applied
+ * @status   : state machine for life cycle of a slot
+ * - members updated due to external events
+ * @recover  : if user enabled recovery for this request.
+ * @req_id   : mask tracking which all devices have request ready
  */
-struct cam_req_mgr_request_table {
-	uint32_t pipeline_delay;
-	struct list_head l_devices;
-	uint32_t dev_mask;
+struct cam_req_mgr_slot {
+	int32_t               idx;
+	int32_t               skip_idx;
+	enum crm_slot_status  status;
+	int32_t               recover;
+	int64_t               req_id;
+};
+
+/**
+ * struct cam_req_mgr_req_queue
+ * @num_slots   : max num of input queue slots
+ * @slot        : request slot holding incoming request id and bubble info.
+ * @rd_idx      : indicates slot index currently in process.
+ * @wr_idx      : indicates slot index to hold new upcoming req.
+ */
+struct cam_req_mgr_req_queue {
+	int32_t                     num_slots;
+	struct cam_req_mgr_slot     slot[MAX_REQ_SLOTS];
+	int32_t                     rd_idx;
+	int32_t                     wr_idx;
+};
+
+/**
+ * struct cam_req_mgr_req_data
+ * @in_q        : Poiner to Input request queue
+ * @l_tbl       : unique pd request tables.
+ * @num_tbl     : how many unique pd value devices are present
+ * @apply_data	: Holds information about request id for a request
+ * @lock        : mutex lock protecting request data ops.
+ */
+struct cam_req_mgr_req_data {
+	struct cam_req_mgr_req_queue *in_q;
+	struct cam_req_mgr_req_tbl   *l_tbl;
+	int32_t                       num_tbl;
+	struct cam_req_mgr_apply      apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct mutex                  lock;
 };
 
 /**
  * struct cam_req_mgr_connected_device
- *- Device Properties
- * @dev_hdl: device handle
- * @dev_bit: unique bit assigned to device in link
- * -Device progress status
- * @available_req_id: tracks latest available req id at this device
- * @processing_req_id: tracks request id currently processed
+ * - Device Properties
+ * @dev_hdl  : device handle
+ * @dev_bit  : unique bit assigned to device in link
  * - Device characteristics
- * @dev_info: holds dev characteristics such as pipeline delay, dev name
- * @ops: holds func pointer to call methods on this device
- * @parent: pvt data - Pointer to parent link device its connected with
- * @entry: entry to the list of connected devices in link
+ * @pd_tbl   : tracks latest available req id at this device
+ * @dev_info : holds dev characteristics such as pipeline delay, dev name
+ * @ops      : holds func pointer to call methods on this device
+ * @parent   : pvt data - like link which this dev hdl belongs to
  */
 struct cam_req_mgr_connected_device {
-	int32_t dev_hdl;
-	int64_t dev_bit;
-	int64_t available_req_id;
-	int64_t processing_req_id;
-	struct cam_req_mgr_device_info dev_info;
-	struct cam_req_mgr_kmd_ops *ops;
-	void *parent;
-	struct list_head entry;
+	int32_t                         dev_hdl;
+	int64_t                         dev_bit;
+	struct cam_req_mgr_req_tbl     *pd_tbl;
+	struct cam_req_mgr_device_info  dev_info;
+	struct cam_req_mgr_kmd_ops     *ops;
+	void                           *parent;
 };
 
 /**
  * struct cam_req_mgr_core_link
- * - Link Properties
- * @link_hdl: Link identifier
- * @num_connections: num of connected devices to this link
- * @max_pipeline_delay: Max of pipeline delay of all connected devs
- * - Input request queue
- * @in_requests: Queue to hold incoming request hints from CSL
- * @workq: Pointer to handle workq related jobs
+ * -  Link Properties
+ * @link_hdl       : Link identifier
+ * @num_devs       : num of connected devices to this link
+ * @max_delay      : Max of pipeline delay of all connected devs
+ * @workq          : Pointer to handle workq related jobs
+ * @pd_mask        : each set bit indicates the device with pd equal to bit
+ *                   position is available.
  * - List of connected devices
- * @l_devices: List of connected devices to this link
- * @fs_list: Holds the request id which each device in link will consume.
- * @req_table: table to keep track of req ids recived at each dev handle
+ * @l_dev          : List of connected devices to this link
+ * - Request handling data struct
+ * @req            : req data holder.
+ * - Timer
+ * @watchdog       : watchdog timer to recover from sof freeze
  * - Link private data
- * @link_state: link state cycle
- * @parent: pvt data - like session info
- * @link_head: List head of connected devices
- * @lock: spin lock to guard link data operations
+ * @workq_comp     : conditional variable to block user thread for workq to
+ *                   finish schedule request processing
+ * @state          : link state machine
+ * @parent         : pvt data - link's parent is session
+ * @lock           : mutex lock to guard link data operations
  */
 struct cam_req_mgr_core_link {
-	int32_t link_hdl;
-	int32_t num_connections;
-	enum cam_pipeline_delay max_pipeline_delay;
-	struct cam_req_mgr_request_queue in_requests;
-	struct cam_req_mgr_core_workq *workq;
-	struct cam_req_mgr_connected_device *l_devices;
-	struct cam_req_mgr_frame_settings fs_list[CAM_REQ_MGR_MAX_LINKED_DEV];
-	struct cam_req_mgr_request_table req_table[CAM_PIPELINE_DELAY_MAX];
-	enum cam_req_mgr_link_state link_state;
-	void *parent;
-	struct list_head link_head;
-	spinlock_t lock;
+	int32_t                              link_hdl;
+	int32_t                              num_devs;
+	enum cam_pipeline_delay              max_delay;
+	struct cam_req_mgr_core_workq       *workq;
+	int32_t                              pd_mask;
+	struct cam_req_mgr_connected_device *l_dev;
+	struct cam_req_mgr_req_data          req;
+	struct cam_req_mgr_timer            *watchdog;
+	struct completion                    workq_comp;
+	enum cam_req_mgr_link_state          state;
+	void                                *parent;
+	struct mutex                         lock;
 };
 
 /**
  * struct cam_req_mgr_core_session
  * - Session Properties
- * @session_hdl: session identifier
- * @num_active_links: num of active links for current session
+ * @session_hdl        : session identifier
+ * @num_links          : num of active links for current session
  * - Links of this session
- * @links: pointer to array of links within session
+ * @links              : pointer to array of links within session
+ * @in_q               : Input request queue one per session
  * - Session private data
- * @entry: pvt data - entry in the list of sessions
- * @lock: pvt data - spin lock to guard session data
+ * @entry              : pvt data - entry in the list of sessions
+ * @lock               : pvt data - spin lock to guard session data
+ * - Debug data
+ * @force_err_recovery : For debugging, we can force bubble recovery
+ *                       to be always ON or always OFF using debugfs.
  */
 struct cam_req_mgr_core_session {
-	int32_t session_hdl;
-	uint32_t num_active_links;
-	struct cam_req_mgr_core_link links[MAX_LINKS_PER_SESSION];
-	struct list_head entry;
-	spinlock_t lock;
+	int32_t                       session_hdl;
+	uint32_t                      num_links;
+	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
+	struct cam_req_mgr_req_queue  in_q;
+	struct list_head              entry;
+	struct mutex                  lock;
+	int32_t                       force_err_recovery;
 };
 
 /**
  * struct cam_req_mgr_core_device
  * - Core camera request manager data struct
- * @session_head: list head holding sessions
- * @crm_lock: mutex lock to protect session creation & destruction
+ * @session_head : list head holding sessions
+ * @crm_lock     : mutex lock to protect session creation & destruction
  */
 struct cam_req_mgr_core_device {
-	struct list_head session_head;
-	struct mutex crm_lock;
+	struct list_head             session_head;
+	struct mutex                 crm_lock;
 };
 
-/* cam_req_mgr_dev to cam_req_mgr_core internal functions */
 /**
  * cam_req_mgr_create_session()
- * @brief: creates session
- * @ses_info: output param for session handle
+ * @brief    : creates session
+ * @ses_info : output param for session handle
  *
- * Called as part of session creation.
+ * called as part of session creation.
  */
-int cam_req_mgr_create_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_destroy_session()
- * @brief: destroy session
- * @ses_info: session handle info, input param
+ * @brief    : destroy session
+ * @ses_info : session handle info, input param
  *
  * Called as part of session destroy
  * return success/failure
  */
-int cam_req_mgr_destroy_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_link()
- * @brief: creates a link for a session
- * @link_info: handle and session info to create a link
+ * @brief     : creates a link for a session
+ * @link_info : handle and session info to create a link
  *
- * Link is formed in a session for multiple devices. It creates
+ * link is formed in a session for multiple devices. it creates
  * a unique link handle for the link and is specific to a
  * session. Returns link handle
  */
@@ -228,10 +359,10 @@
 
 /**
  * cam_req_mgr_unlink()
- * @brief: destroy a link in a session
- * @unlink_info: session and link handle info
+ * @brief       : destroy a link in a session
+ * @unlink_info : session and link handle info
  *
- * Link is destroyed in a session
+ * link is destroyed in a session
  */
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info);
 
@@ -244,11 +375,11 @@
 	struct cam_req_mgr_sched_request *sched_req);
 
 /**
- * cam_req_mgr_sync_mode()
+ * cam_req_mgr_sync_link()
  * @brief: sync for links in a session
  * @sync_links: session, links info and master link info
  */
-int cam_req_mgr_sync_mode(struct cam_req_mgr_sync_mode *sync_links);
+int cam_req_mgr_sync_link(struct cam_req_mgr_sync_mode *sync_links);
 
 /**
  * cam_req_mgr_flush_requests()
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index cf2fe7f..2a831e8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -15,6 +15,16 @@
 #define CRM_TRACE_ENABLE 0
 #define CRM_DEBUG_MUTEX 0
 
+#define SET_SUCCESS_BIT(ret, pd)	{\
+	(ret) |= (1 << (pd));	\
+	}
+
+#define SET_FAILURE_BIT(ret, pd)	{\
+	(ret) &= (0 << (pd));	\
+	}
+
+#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
+
 #if (CRM_TRACE_ENABLE == 1)
 	#define CRM_DBG(fmt, args...) do { \
 	trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
new file mode 100644
index 0000000..19833d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_debug.h"
+
+#define MAX_SESS_INFO_LINE_BUFF_LEN 256
+
+static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+
+static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
+{
+	struct cam_req_mgr_core_device  *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+	int rc = 0;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			session->force_err_recovery = val;
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return rc;
+}
+
+static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val)
+{
+	struct cam_req_mgr_core_device *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		session = list_first_entry(&core_dev->session_head,
+			struct cam_req_mgr_core_session,
+			entry);
+		*val = session->force_err_recovery;
+	}
+	mutex_unlock(&core_dev->crm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery,
+	cam_req_mgr_debug_set_bubble_recovery, "%lld\n");
+
+static int session_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t session_info_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = sess_info_buffer;
+	char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0};
+	struct cam_req_mgr_core_device *core_dev =
+		(struct cam_req_mgr_core_device *) t_file->private_data;
+	struct cam_req_mgr_core_session *session;
+
+	memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			snprintf(line_buffer, sizeof(line_buffer),
+				"session_hdl = %x \t"
+				"num_links = %d\n",
+				session->session_hdl, session->num_links);
+			strlcat(out_buffer, line_buffer,
+				sizeof(sess_info_buffer));
+			for (i = 0; i < session->num_links; i++) {
+				snprintf(line_buffer, sizeof(line_buffer),
+					"link_hdl[%d] = 0x%x, num_devs connected = %d\n",
+					i, session->links[i]->link_hdl,
+					session->links[i]->num_devs);
+				strlcat(out_buffer, line_buffer,
+					sizeof(sess_info_buffer));
+			}
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t session_info_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	return 0;
+}
+
+static const struct file_operations session_info = {
+	.open = session_info_open,
+	.read = session_info_read,
+	.write = session_info_write,
+};
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
+{
+	struct dentry *debugfs_root;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "cam_req_mgr");
+	debugfs_root = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_root)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("sessions_info", 0644,
+		debugfs_root, core_dev, &session_info))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bubble_recovery", 0644,
+		debugfs_root, core_dev, &bubble_recovery))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
new file mode 100644
index 0000000..82ac764
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_DEBUG_H_
+#define _CAM_REQ_MGR_DEBUG_H_
+
+#include <linux/debugfs.h>
+#include "cam_req_mgr_core.h"
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 43b020c6..13affe9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -326,7 +326,7 @@
 			return -EFAULT;
 		}
 
-		rc = cam_req_mgr_sync_mode(&sync_mode);
+		rc = cam_req_mgr_sync_link(&sync_mode);
 		}
 		break;
 	case CAM_REQ_MGR_ALLOC_BUF: {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 174a725..91860f6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -18,15 +18,14 @@
 #include "cam_req_mgr_core_defs.h"
 #include "cam_req_mgr_util.h"
 
-/* Forward declarations */
 struct cam_req_mgr_sof_notify;
 struct cam_req_mgr_error_notify;
 struct cam_req_mgr_add_request;
 struct cam_req_mgr_device_info;
 struct cam_req_mgr_core_dev_link_setup;
 struct cam_req_mgr_apply_request;
-
-/*Ops table for req mgr - kmd communication */
+struct cam_req_mgr_flush_request;
+struct cam_req_mgr_link_evt_data;
 
 /* Request Manager -- camera device driver interface */
 /**
@@ -44,21 +43,25 @@
  * @brief: cam req mgr to camera device drivers
  *
  * @cam_req_mgr_get_dev_info: to fetch details about device linked
- * @cam_req_mgr_link_setup: to establish link with device for a session
- * @cam_req_mgr_notify_err: to broadcast error happened on link for request id
- * @cam_req_mgr_apply_req: CRM asks device to apply certain request id.
+ * @cam_req_mgr_link_setup  : to establish link with device for a session
+ * @cam_req_mgr_notify_err  : to broadcast error happened on link for request id
+ * @cam_req_mgr_apply_req   : CRM asks device to apply certain request id.
+ * @cam_req_mgr_flush_req   : Flush or cancle request
+ * cam_req_mgr_process_evt  : generic events
  */
 typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
 typedef int (*cam_req_mgr_link_setup)(
 	struct cam_req_mgr_core_dev_link_setup *);
 typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *);
+typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
+typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
 
 /**
- * @brief: cam_req_mgr_crm_cb - func table
+ * @brief      : cam_req_mgr_crm_cb - func table
  *
- * @notify_sof: payload for sof indication event
- * @notify_err: payload for different error occurred at device
- * @add_req: pauload to inform which device and what request is received
+ * @notify_sof : payload for sof indication event
+ * @notify_err : payload for different error occurred at device
+ * @add_req    : payload to inform which device and what request is received
  */
 struct cam_req_mgr_crm_cb {
 	cam_req_mgr_notify_sof  notify_sof;
@@ -67,26 +70,30 @@
 };
 
 /**
- * @brief: cam_req_mgr_kmd_ops - func table
+ * @brief        : cam_req_mgr_kmd_ops - func table
  *
- * @get_dev_info: payload to fetch device details
- * @link_setup: payload to establish link with device
- * @apply_req: payload to apply request id on a device linked
+ * @get_dev_info : payload to fetch device details
+ * @link_setup   : payload to establish link with device
+ * @apply_req    : payload to apply request id on a device linked
+ * @flush_req    : payload to flush request
+ * @process_evt  : payload to generic event
  */
 struct cam_req_mgr_kmd_ops {
 	cam_req_mgr_get_dev_info      get_dev_info;
 	cam_req_mgr_link_setup        link_setup;
 	cam_req_mgr_apply_req         apply_req;
+	cam_req_mgr_flush_req        flush_req;
+	cam_req_mgr_process_evt      process_evt;
 };
 
 /**
  * enum cam_pipeline_delay
- * @brief: enumerator for different pipeline delays in camera
+ * @brief     : enumerator for different pipeline delays in camera
  *
- * @DELAY_0: device processed settings on same frame
- * @DELAY_1: device processed settings after 1 frame
- * @DELAY_2: device processed settings after 2 frames
- * @DELAY_MAX: maximum supported pipeline delay
+ * @DELAY_0   : device processed settings on same frame
+ * @DELAY_1   : device processed settings after 1 frame
+ * @DELAY_2   : device processed settings after 2 frames
+ * @DELAY_MAX : maximum supported pipeline delay
  */
 enum cam_pipeline_delay {
 	CAM_PIPELINE_DELAY_0,
@@ -97,11 +104,11 @@
 
 /**
  * enum cam_req_status
- * @brief: enumerator for request status
+ * @brief   : enumerator for request status
  *
- * @SUCCESS: device processed settings successfully
- * @FAILED: device processed settings failed
- * @MAX: invalid status value
+ * @SUCCESS : device processed settings successfully
+ * @FAILED  : device processed settings failed
+ * @MAX     : invalid status value
  */
 enum cam_req_status {
 	CAM_REQ_STATUS_SUCCESS,
@@ -111,15 +118,15 @@
 
 /**
  * enum cam_req_mgr_device_error
- * @brief: enumerator for different errors occurred at device
+ * @brief      : enumerator for different errors occurred at device
  *
- * @NOT_FOUND: settings asked by request manager is not found
- * @BUBBLE: device hit timing issue and is able to recover
- * @FATAL: device is in bad shape and can not recover from error
- * @PAGE_FAULT: Page fault while accessing memory
- * @OVERFLOW: Bus Overflow for IFE/VFE
- * @TIMEOUT: Timeout from cci or bus.
- * @MAX: Invalid error value
+ * @NOT_FOUND  : settings asked by request manager is not found
+ * @BUBBLE     : device hit timing issue and is able to recover
+ * @FATAL      : device is in bad shape and can not recover from error
+ * @PAGE_FAULT : Page fault while accessing memory
+ * @OVERFLOW   : Bus Overflow for IFE/VFE
+ * @TIMEOUT    : Timeout from cci or bus.
+ * @MAX        : Invalid error value
  */
 enum cam_req_mgr_device_error {
 	CRM_KMD_ERR_NOT_FOUND,
@@ -133,17 +140,17 @@
 
 /**
  * enum cam_req_mgr_device_id
- * @brief: enumerator for different devices in subsystem
+ * @brief       : enumerator for different devices in subsystem
  *
- * @CAM_REQ_MGR: request manager itself
- * @SENSOR: sensor device
- * @FLASH: LED flash or dual LED device
- * @ACTUATOR: lens mover
- * @IFE: Image processing device
- * @EXTERNAL_1: third party device
- * @EXTERNAL_2: third party device
- * @EXTERNAL_3: third party device
- * @MAX: invalid device id
+ * @CAM_REQ_MGR : request manager itself
+ * @SENSOR      : sensor device
+ * @FLASH       : LED flash or dual LED device
+ * @ACTUATOR    : lens mover
+ * @IFE         : Image processing device
+ * @EXTERNAL_1  : third party device
+ * @EXTERNAL_2  : third party device
+ * @EXTERNAL_3  : third party device
+ * @MAX         : invalid device id
  */
 enum cam_req_mgr_device_id {
 	CAM_REQ_MGR_DEVICE,
@@ -158,11 +165,22 @@
 };
 
 /* Camera device driver to Req Mgr device interface */
+
+/**
+ * enum cam_req_mgr_link_evt_type
+ * @CAM_REQ_MGR_LINK_EVT_ERR:
+ * @CAM_REQ_MGR_LINK_EVT_MAX:
+ */
+enum cam_req_mgr_link_evt_type {
+	CAM_REQ_MGR_LINK_EVT_ERR,
+	CAM_REQ_MGR_LINK_EVT_MAX,
+};
+
 /**
  * struct cam_req_mgr_sof_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @frame_id: frame id for internal tracking
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @frame_id : frame id for internal tracking
  */
 struct cam_req_mgr_sof_notify {
 	int32_t link_hdl;
@@ -172,11 +190,10 @@
 
 /**
  * struct cam_req_mgr_error_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which hit error
- * @error: what error device hit while processing this req
- *
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which hit error
+ * @error    : what error device hit while processing this req
  */
 struct cam_req_mgr_error_notify {
 	int32_t link_hdl;
@@ -187,9 +204,9 @@
 
 /**
  * struct cam_req_mgr_add_request
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which device is ready to process
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which device is ready to process
  *
  */
 struct cam_req_mgr_add_request {
@@ -202,48 +219,91 @@
 /* CRM to KMD devices */
 /**
  * struct cam_req_mgr_device_info
- * @dev_hdl: Input_param : device handle for reference
- * @name: link link or unlink
- * @dev_id: device id info
- * @p_delay: delay between time settings applied and take effect
+ * @dev_hdl : Input_param : device handle for reference
+ * @name    : link link or unlink
+ * @dev_id  : device id info
+ * @p_delay : delay between time settings applied and take effect
  *
  */
 struct cam_req_mgr_device_info {
-	int32_t dev_hdl;
-	char name[256];
-	enum cam_req_mgr_device_id dev_id;
-	enum cam_pipeline_delay p_delay;
+	int32_t                     dev_hdl;
+	char                        name[256];
+	enum cam_req_mgr_device_id  dev_id;
+	enum cam_pipeline_delay     p_delay;
 };
 
 /**
  * struct cam_req_mgr_core_dev_link_setup
- * @link_enable: link link or unlink
- * @link_hdl: link identifier
- * @dev_hdl: device handle for reference
- * @max_delay: max pipeline delay on this link
- * @crm_cb: callback funcs to communicate with req mgr
+ * @link_enable : link link or unlink
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for reference
+ * @max_delay   : max pipeline delay on this link
+ * @crm_cb      : callback funcs to communicate with req mgr
  *
  */
 struct cam_req_mgr_core_dev_link_setup {
-	bool link_enable;
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	enum cam_pipeline_delay max_delay;
+	int32_t                    link_enable;
+	int32_t                    link_hdl;
+	int32_t                    dev_hdl;
+	enum cam_pipeline_delay    max_delay;
 	struct cam_req_mgr_crm_cb *crm_cb;
 };
 
 /**
  * struct cam_req_mgr_apply_request
- * @link_id: link identifier
- * @dev_hdl: device handle for cross check
- * @request_id: request id settings to apply
- * @report_if_bubble: report to crm if failure in applying
+ * @link_hdl         : link identifier
+ * @dev_hdl          : device handle for cross check
+ * @request_id       : request id settings to apply
+ * @report_if_bubble : report to crm if failure in applying
  *
  */
 struct cam_req_mgr_apply_request {
+	int32_t    link_hdl;
+	int32_t    dev_hdl;
+	int64_t    request_id;
+	int32_t    report_if_bubble;
+};
+
+/**
+ * struct cam_req_mgr_flush_request
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for cross check
+ * @type        : cancel request type flush all or a request
+ * @request_id  : request id to cancel
+ *
+ */
+struct cam_req_mgr_flush_request {
+	int32_t     link_hdl;
+	int32_t     dev_hdl;
+	uint32_t    type;
+	int64_t     req_id;
+};
+
+/**
+ * struct cam_req_mgr_event_data
+ * @link_hdl : link handle
+ * @req_id   : request id
+ *
+ */
+struct cam_req_mgr_link_evt_data {
 	int32_t link_hdl;
 	int32_t dev_hdl;
-	int64_t request_id;
-	int32_t report_if_bubble;
+	int64_t req_id;
+
+	enum cam_req_mgr_link_evt_type evt_type;
+	union {
+		enum cam_req_mgr_device_error error;
+	} u;
+};
+
+/**
+ * struct cam_req_mgr_send_request
+ * @link_hdl   : link identifier
+ * @idx        : slot idx
+ *
+ */
+struct cam_req_mgr_send_request {
+	int32_t    link_hdl;
+	struct cam_req_mgr_req_queue *in_q;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
new file mode 100644
index 0000000..9da445d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_timer.h"
+
+void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
+{
+	if (!crm_timer)
+		return;
+	CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+		crm_timer->expires, jiffies);
+	mod_timer(&crm_timer->sys_timer,
+		(jiffies + msecs_to_jiffies(crm_timer->expires)));
+}
+
+void crm_timer_callback(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+	crm_timer_reset(timer);
+}
+
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires)
+{
+	CRM_DBG("new time %d", expires);
+	if (crm_timer) {
+		crm_timer->expires = expires;
+		crm_timer_reset(crm_timer);
+	}
+}
+
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long))
+{
+	int                       ret = 0;
+	struct cam_req_mgr_timer *crm_timer = NULL;
+
+	CRM_DBG("init timer %d %pK", expires, *timer);
+	if (*timer == NULL) {
+		crm_timer = (struct cam_req_mgr_timer *)
+			kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
+		if (!crm_timer) {
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		if (timer_cb != NULL)
+			crm_timer->timer_cb = timer_cb;
+		else
+			crm_timer->timer_cb = crm_timer_callback;
+
+		crm_timer->expires = expires;
+		crm_timer->parent = parent;
+		setup_timer(&crm_timer->sys_timer,
+			crm_timer->timer_cb, (unsigned long)crm_timer);
+		crm_timer_reset(crm_timer);
+		*timer = crm_timer;
+	} else {
+		CRM_WARN("Timer already exists!!");
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
+{
+	CRM_DBG("destroy timer %pK", *crm_timer);
+	if (*crm_timer) {
+		del_timer(&(*crm_timer)->sys_timer);
+		kfree(*crm_timer);
+		*crm_timer = NULL;
+	}
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
new file mode 100644
index 0000000..4d600ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_TIMER_H_
+#define _CAM_REQ_MGR_TIMER_H_
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core_defs.h"
+
+/** struct cam_req_mgr_timer
+ * @expires   : timeout value for timer
+ * @sys_timer : system timer variable
+ * @parent    : priv data - link pointer
+ * @timer_cb  : callback func which will be called when timeout expires
+ */
+struct cam_req_mgr_timer {
+	int32_t             expires;
+	struct timer_list   sys_timer;
+	void               *parent;
+	void              (*timer_cb)(unsigned long data);
+};
+
+/**
+ * crm_timer_modify()
+ * @brief : allows ser to modify expiry time.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires);
+
+/**
+ * crm_timer_reset()
+ * @brief : destroys the timer allocated.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_reset(struct cam_req_mgr_timer *timer);
+
+/**
+ * crm_timer_init()
+ * @brief    : create a new general purpose timer.
+ *             timer utility takes care of allocating memory and deleting
+ * @timer    : double pointer to new timer allocated
+ * @expires  : Timeout value to fire callback
+ * @parent   : void pointer which caller can use for book keeping
+ * @timer_cb : caller can chose to use its own callback function when
+ *             timer fires the timeout. If no value is set timer util
+ *             will use default.
+ */
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long));
+
+/**
+ * crm_timer_exit()
+ * @brief : destroys the timer allocated.
+ * @timer : timer pointer which will be freed
+ */
+void crm_timer_exit(struct cam_req_mgr_timer **timer);
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 1f6a97a..f53e41c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,7 +12,7 @@
 
 #include "cam_req_mgr_workq.h"
 
-/* workqueue's task manager methods */
+
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
@@ -21,7 +21,7 @@
 	if (!workq)
 		return NULL;
 
-	spin_lock(&workq->task.lock);
+	spin_lock_bh(&workq->lock_bh);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -31,9 +31,9 @@
 		atomic_sub(1, &workq->task.free_cnt);
 		list_del_init(&task->entry);
 	}
-end:
-	spin_unlock(&workq->task.lock);
 
+end:
+	spin_unlock_bh(&workq->lock_bh);
 	return task;
 }
 
@@ -42,17 +42,20 @@
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
 
+	spin_lock_bh(&workq->lock_bh);
+	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
 	task->priv = NULL;
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
+	spin_unlock_bh(&workq->lock_bh);
 }
 
 /**
  * cam_req_mgr_process_task() - Process the enqueued task
- * @task: pointer to task worker thread shall process
+ * @task: pointer to task workq thread shall process
  */
 static int cam_req_mgr_process_task(struct crm_workq_task *task)
 {
@@ -62,31 +65,10 @@
 		return -EINVAL;
 
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
-
-	switch (task->type) {
-	case CRM_WORKQ_TASK_SCHED_REQ:
-	case CRM_WORKQ_TASK_DEV_ADD_REQ:
-	case CRM_WORKQ_TASK_NOTIFY_SOF:
-	case CRM_WORKQ_TASK_NOTIFY_ACK:
-	case CRM_WORKQ_TASK_NOTIFY_ERR:
-		if (task->process_cb)
-			task->process_cb(task->priv, &task->u);
-		else
-			CRM_WARN("FATAL:no task handler registered for workq!");
-		break;
-	case CRM_WORKQ_TASK_GET_DEV_INFO:
-	case CRM_WORKQ_TASK_SETUP_LINK:
-	case CRM_WORKQ_TASK_APPLY_REQ:
-		/* These tasks are not expected to be queued to
-		 * workque at the present
-		 */
-		CRM_DBG("Not supported");
-		break;
-	case CRM_WORKQ_TASK_INVALID:
-	default:
-		CRM_ERR("Invalid task type %x", task->type);
-		break;
-	}
+	if (task->process_cb)
+		task->process_cb(task->priv, task->payload);
+	else
+		CRM_WARN("FATAL:no task handler registered for workq");
 	cam_req_mgr_workq_put_task(task);
 
 	return 0;
@@ -99,8 +81,8 @@
 static void cam_req_mgr_process_workq(struct work_struct *w)
 {
 	struct cam_req_mgr_core_workq *workq = NULL;
-	struct crm_workq_task *task, *task_save;
-
+	struct crm_workq_task         *task, *task_save;
+	int32_t                        i = CRM_TASK_PRIORITY_0;
 	if (!w) {
 		CRM_ERR("NULL task pointer can not schedule");
 		return;
@@ -108,19 +90,44 @@
 	workq = (struct cam_req_mgr_core_workq *)
 		container_of(w, struct cam_req_mgr_core_workq, work);
 
-	list_for_each_entry_safe(task, task_save,
-		&workq->task.process_head, entry) {
-		atomic_sub(1, &workq->task.pending_cnt);
-		spin_lock(&workq->task.lock);
-		list_del_init(&task->entry);
-		spin_unlock(&workq->task.lock);
-		cam_req_mgr_process_task(task);
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				atomic_sub(1, &workq->task.pending_cnt);
+				cam_req_mgr_process_task(task);
+			}
+			CRM_DBG("processed task %pK free_cnt %d",
+				task, atomic_read(&workq->task.free_cnt));
+		}
+		i++;
 	}
-	CRM_DBG("processed task %p free_cnt %d",
-		task, atomic_read(&workq->task.free_cnt));
 }
 
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task)
+void crm_workq_clear_q(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                 i = CRM_TASK_PRIORITY_0;
+	struct crm_workq_task  *task, *task_save;
+
+	CRM_DBG("pending_cnt %d",
+		atomic_read(&workq->task.pending_cnt));
+
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				cam_req_mgr_workq_put_task(task);
+				CRM_WARN("flush task %pK, %d, cnt %d",
+					task, i, atomic_read(
+					&workq->task.free_cnt));
+			}
+		}
+		i++;
+	}
+}
+
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio)
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
@@ -132,31 +139,33 @@
 	}
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
 	if (!workq) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
+		CRM_DBG("NULL workq pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 	if (!workq->job) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 
+	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock(&workq->task.lock);
+		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
-	spin_lock(&workq->task.lock);
+	task->priv = priv;
+	task->priority =
+		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
+		? prio : CRM_TASK_PRIORITY_0;
 	list_add_tail(&task->entry,
-		&workq->task.process_head);
-	spin_unlock(&workq->task.lock);
+		&workq->task.process_head[task->priority]);
 	atomic_add(1, &workq->task.pending_cnt);
-	CRM_DBG("enq task %p pending_cnt %d",
+	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-
+	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
@@ -164,7 +173,8 @@
 	return rc;
 }
 
-int cam_req_mgr_workq_create(char *name, struct cam_req_mgr_core_workq **workq)
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+	struct cam_req_mgr_core_workq **workq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -189,20 +199,35 @@
 
 		/* Workq attributes initialization */
 		INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
+		spin_lock_init(&crm_workq->lock_bh);
+		CRM_DBG("LOCK_DBG workq %s lock %pK",
+			name, &crm_workq->lock_bh);
 
 		/* Task attributes initialization */
-		spin_lock_init(&crm_workq->task.lock);
 		atomic_set(&crm_workq->task.pending_cnt, 0);
 		atomic_set(&crm_workq->task.free_cnt, 0);
-		INIT_LIST_HEAD(&crm_workq->task.process_head);
+		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
+			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
-		memset(crm_workq->task.pool, 0,
-			sizeof(struct crm_workq_task) *
-			CRM_WORKQ_NUM_TASKS);
-		for (i = 0; i < CRM_WORKQ_NUM_TASKS; i++) {
+		crm_workq->task.num_task = num_tasks;
+		crm_workq->task.pool = (struct crm_workq_task *)
+			kzalloc(sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task,
+				GFP_KERNEL);
+		if (!crm_workq->task.pool) {
+			CRM_WARN("Insufficient memory %lu",
+				sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task);
+			kfree(crm_workq);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < crm_workq->task.num_task; i++) {
 			task = &crm_workq->task.pool[i];
 			task->parent = (void *)crm_workq;
 			/* Put all tasks in free pool */
+			list_add_tail(&task->entry,
+			&crm_workq->task.process_head[CRM_TASK_PRIORITY_0]);
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
@@ -213,15 +238,16 @@
 	return 0;
 }
 
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *crm_workq)
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
-	CRM_DBG("destroy workque %p", crm_workq);
-	if (crm_workq) {
-		if (crm_workq->job) {
-			destroy_workqueue(crm_workq->job);
-			crm_workq->job = NULL;
+	CRM_DBG("destroy workque %pK", crm_workq);
+	if (*crm_workq) {
+		crm_workq_clear_q(*crm_workq);
+		if ((*crm_workq)->job) {
+			destroy_workqueue((*crm_workq)->job);
+			(*crm_workq)->job = NULL;
 		}
-		kfree(crm_workq);
-		crm_workq = NULL;
+		kfree(*crm_workq);
+		*crm_workq = NULL;
 	}
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 6b36abc..7d8ca59 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _CAM_WORKER_H_
-#define _CAM_WORKER_H_
+#ifndef _CAM_REQ_MGR_WORKQ_H_
+#define _CAM_REQ_MGR_WORKQ_H_
 
 #include<linux/kernel.h>
 #include<linux/module.h>
@@ -23,99 +23,78 @@
 
 #include "cam_req_mgr_core.h"
 
-/* Macros */
-#define CRM_WORKQ_NUM_TASKS 30
-
-/**enum crm_workq_task_type
- * @codes: to identify which type of task is present
- */
-enum crm_workq_task_type {
-	CRM_WORKQ_TASK_GET_DEV_INFO,
-	CRM_WORKQ_TASK_SETUP_LINK,
-	CRM_WORKQ_TASK_SCHED_REQ,
-	CRM_WORKQ_TASK_DEV_ADD_REQ,
-	CRM_WORKQ_TASK_APPLY_REQ,
-	CRM_WORKQ_TASK_NOTIFY_SOF,
-	CRM_WORKQ_TASK_NOTIFY_ACK,
-	CRM_WORKQ_TASK_NOTIFY_ERR,
-	CRM_WORKQ_TASK_INVALID,
+/* Task priorities, lower the number higher the priority*/
+enum crm_task_priority {
+	CRM_TASK_PRIORITY_0 = 0,
+	CRM_TASK_PRIORITY_1 = 1,
+	CRM_TASK_PRIORITY_MAX = 2,
 };
 
 /** struct crm_workq_task
- * @type: type of task
- * u -
- * @csl_req: contains info of  incoming reqest from CSL to CRM
- * @dev_req: contains tracking info of available req id at device
- * @apply_req: contains info of which request is applied at device
- * @notify_sof: contains notification from IFE to CRM about SOF trigger
- * @notify_err: contains error inf happened while processing request
- * @dev_info: contains info about which device is connected with CRM
- * @link_setup: contains info about new link being setup
- * -
- * @process_cb: registered callback called by workq when task enqueued is ready
- *  for processing in workq thread context
- * @parent: workq's parent is link which is enqqueing taks to this workq
- * @entry: list head of this list entry is worker's empty_head
- * @cancel: if caller has got free task from pool but wants to abort or put
- *  back without using it
- * @priv: when task is enqueuer caller can attach cookie
+ * @priority   : caller can assign priority to task based on type.
+ * @payload    : depending of user of task this payload type will change
+ * @process_cb : registered callback called by workq when task enqueued is
+ *               ready for processing in workq thread context
+ * @parent     : workq's parent is link which is enqqueing taks to this workq
+ * @entry      : list head of this list entry is worker's empty_head
+ * @cancel     : if caller has got free task from pool but wants to abort
+ *               or put back without using it
+ * @priv       : when task is enqueuer caller can attach priv along which
+ *               it will get in process callback
+ * @ret        : return value in future to use for blocking calls
  */
 struct crm_workq_task {
-	enum crm_workq_task_type type;
-	union {
-		struct cam_req_mgr_sched_request csl_req;
-		struct cam_req_mgr_add_request dev_req;
-		struct cam_req_mgr_apply_request apply_req;
-		struct cam_req_mgr_sof_notify notify_sof;
-		struct cam_req_mgr_error_notify notify_err;
-		struct cam_req_mgr_device_info dev_info;
-		struct cam_req_mgr_core_dev_link_setup link_setup;
-	} u;
-	int (*process_cb)(void *, void *);
-	void *parent;
-	struct list_head entry;
-	uint8_t cancel;
-	void *priv;
+	int32_t                  priority;
+	void                    *payload;
+	int32_t                (*process_cb)(void *, void *);
+	void                    *parent;
+	struct list_head         entry;
+	uint8_t                  cancel;
+	void                    *priv;
+	int32_t                  ret;
 };
 
-/** struct crm_core_worker
- * @work: work token used by workqueue
- * @job: workqueue internal job struct
- *task -
- * @lock: lock for task structs
- * @pending_cnt:  num of tasks pending to be processed
- * @free_cnt:  num of free/available tasks
- * @process_head: list  head of tasks pending process
- * @empty_head: list  head of available tasks which can be used
- * or acquired in order to enqueue a task to workq
- * @pool: pool  of tasks used for handling events in workq context
- *@num_task : size of tasks pool
+/** struct cam_req_mgr_core_workq
+ * @work       : work token used by workqueue
+ * @job        : workqueue internal job struct
+ * task -
+ * @lock       : lock for task structs
+ * @free_cnt   :  num of free/available tasks
+ * @empty_head : list  head of available taska which can be used
+ *               or acquired in order to enqueue a task to workq
+ * @pool       : pool of tasks used for handling events in workq context
+ * @num_task   : size of tasks pool
+ * -
  */
 struct cam_req_mgr_core_workq {
-	struct work_struct work;
-	struct workqueue_struct *job;
+	struct work_struct         work;
+	struct workqueue_struct   *job;
+	spinlock_t                 lock_bh;
 
+	/* tasks */
 	struct {
-		spinlock_t lock;
-		atomic_t pending_cnt;
-		atomic_t free_cnt;
+		struct mutex           lock;
+		atomic_t               pending_cnt;
+		atomic_t               free_cnt;
 
-		struct list_head process_head;
-		struct list_head empty_head;
-		struct crm_workq_task pool[CRM_WORKQ_NUM_TASKS];
+		struct list_head       process_head[CRM_TASK_PRIORITY_MAX];
+		struct list_head       empty_head;
+		struct crm_workq_task *pool;
+		uint32_t               num_task;
 	} task;
 };
 
 /**
  * cam_req_mgr_workq_create()
- * @brief: create a workqueue
- * @name: Name of the workque to be allocated,
- * it is combination of session handle and link handle
- * @workq: Double pointer worker
+ * @brief    : create a workqueue
+ * @name     : Name of the workque to be allocated, it is combination
+ *             of session handle and link handle
+ * @num_task : Num_tasks to be allocated for workq
+ * @workq    : Double pointer worker
  * This function will allocate and create workqueue and pass
- * the worker pointer to caller.
+ * the workq pointer to caller.
  */
-int cam_req_mgr_workq_create(char *name,
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
 	struct cam_req_mgr_core_workq **workq);
 
 /**
@@ -125,15 +104,18 @@
  * this function will destroy workqueue and clean up resources
  * associated with worker such as tasks.
  */
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *workq);
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
 
 /**
  * cam_req_mgr_workq_enqueue_task()
  * @brief: Enqueue task in worker queue
- * @task: task to be processed by worker
+ * @task : task to be processed by worker
+ * @priv : clients private data
+ * @prio : task priority
  * process callback func
  */
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task);
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio);
 
 /**
  * cam_req_mgr_workq_get_task()
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 1bab010..c147b0b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -52,8 +52,8 @@
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
-#define DEFAULT_UBWC_MALSIZE	1
-#define DEFAULT_UBWC_SWIZZLE	1
+#define DEFAULT_UBWC_MALSIZE	0
+#define DEFAULT_UBWC_SWIZZLE	0
 
 #define DEFAULT_MAXLINEWIDTH	4096
 
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 7215fdf..6601c9a 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1480,22 +1480,22 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_VUI_TIMING_INFO:
+	case HAL_PARAM_VENC_VUI_TIMING_INFO:
 	{
-		struct hfi_h264_vui_timing_info *hfi;
-		struct hal_h264_vui_timing_info *timing_info = pdata;
+		struct hfi_vui_timing_info *hfi;
+		struct hal_vui_timing_info *timing_info = pdata;
 
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+			HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO;
 
-		hfi = (struct hfi_h264_vui_timing_info *)&pkt->
+		hfi = (struct hfi_vui_timing_info *)&pkt->
 			rg_property_data[1];
 		hfi->enable = timing_info->enable;
 		hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
 		hfi->time_scale = timing_info->time_scale;
 
 		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_h264_vui_timing_info);
+			sizeof(struct hfi_vui_timing_info);
 		break;
 	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index c0b6683..006cd49 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -988,6 +988,15 @@
 			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)),
 		.qmenu = iframe_sizes,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+		.name = "Frame Rate based Rate Control",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+	},
 
 };
 
@@ -1131,6 +1140,7 @@
 	int max_hierp_layers;
 	int baselayerid = 0;
 	struct hal_video_signal_info signal_info = {0};
+	struct hal_vui_timing_info vui_timing_info = {0};
 	enum hal_iframesize_type iframesize_type = HAL_IFRAMESIZE_TYPE_DEFAULT;
 
 	if (!inst || !inst->core || !inst->core->device) {
@@ -1219,7 +1229,7 @@
 	{
 		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		inst->clk_data.bitrate = ctrl->val;
 		break;
@@ -1243,7 +1253,7 @@
 
 		property_id = HAL_CONFIG_VENC_MAX_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		break;
 	}
@@ -1849,6 +1859,43 @@
 				ctrl->val);
 		pdata = &iframesize_type;
 		break;
+	case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+	{
+		property_id = HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO:
+	{
+		struct v4l2_ctrl *rc_mode;
+		bool cfr = false;
+
+		property_id = HAL_PARAM_VENC_VUI_TIMING_INFO;
+		pdata = &vui_timing_info;
+
+		if (ctrl->val != V4L2_MPEG_VIDC_VIDEO_VUI_TIMING_INFO_ENABLED) {
+			vui_timing_info.enable = 0;
+			break;
+		}
+
+		rc_mode = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+
+		switch (rc_mode->val) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR:
+			cfr = true;
+			break;
+		default:
+			cfr = false;
+		}
+
+		vui_timing_info.enable = 1;
+		vui_timing_info.fixed_frame_rate = cfr;
+		vui_timing_info.time_scale = NSEC_PER_SEC;
+		break;
+	}
 	default:
 		dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
 		rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index cd518fb..b80aa08 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -831,7 +831,7 @@
 	list_for_each_entry(inst, &core->instances, list) {
 		u32 cycles, lp_cycles;
 
-		if (!(inst->clk_data.core_id && core_id))
+		if (!(inst->clk_data.core_id & core_id))
 			continue;
 		if (inst->session_type == MSM_VIDC_DECODER) {
 			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 8752378..474c2fb6 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -187,7 +187,7 @@
 	HAL_PARAM_VDEC_SYNC_FRAME_DECODE,
 	HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
 	HAL_CONFIG_VENC_MAX_BITRATE,
-	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
+	HAL_PARAM_VENC_VUI_TIMING_INFO,
 	HAL_PARAM_VENC_GENERATE_AUDNAL,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
@@ -813,7 +813,7 @@
 };
 
 
-struct hal_h264_vui_timing_info {
+struct hal_vui_timing_info {
 	u32 enable;
 	u32 fixed_frame_rate;
 	u32 time_scale;
@@ -1036,7 +1036,7 @@
 	struct hal_codec_supported codec_supported;
 	struct hal_multi_view_select multi_view_select;
 	struct hal_timestamp_scale timestamp_scale;
-	struct hal_h264_vui_timing_info h264_vui_timing_info;
+	struct hal_vui_timing_info vui_timing_info;
 	struct hal_preserve_text_quality preserve_text_quality;
 	struct hal_buffer_info buffer_info;
 	struct hal_buffer_alloc_mode buffer_alloc_mode;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 77164be..81b4d91 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -283,7 +283,7 @@
 	 (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
 #define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
-#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO	\
+#define HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
@@ -589,7 +589,7 @@
 	u32 matrix_coeffs;
 };
 
-struct hfi_h264_vui_timing_info {
+struct hfi_vui_timing_info {
 	u32 enable;
 	u32 fixed_frame_rate;
 	u32 time_scale;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index e3f35bc..4e111cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -500,9 +500,9 @@
 
 	for (i = 0; i < request->n_ssids; i++) {
 		wil_dbg_misc(wil, "SSID[%d]", i);
-		print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-				     request->ssids[i].ssid,
-				     request->ssids[i].ssid_len);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ssids[i].ssid,
+				  request->ssids[i].ssid_len, true);
 	}
 
 	if (request->n_ssids)
@@ -539,8 +539,8 @@
 	}
 
 	if (request->ie_len)
-		print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
-				     request->ie, request->ie_len);
+		wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ie, request->ie_len, true);
 	else
 		wil_dbg_misc(wil, "Scan has no IE's\n");
 
@@ -764,6 +764,8 @@
 	rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
 	if (rc == 0) {
 		netif_carrier_on(ndev);
+		wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+		wil->bss = bss;
 		/* Connect can take lots of time */
 		mod_timer(&wil->connect_timer,
 			  jiffies + msecs_to_jiffies(2000));
@@ -792,6 +794,7 @@
 		return 0;
 	}
 
+	wil->locally_generated_disc = true;
 	rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
 		      WMI_DISCONNECT_EVENTID, NULL, 0,
 		      WIL6210_DISCONNECT_TO_MS);
@@ -845,7 +848,8 @@
 	 */
 
 	wil_dbg_misc(wil, "mgmt_tx\n");
-	print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+	wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+			  len, true);
 
 	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
 	if (!cmd) {
@@ -1178,18 +1182,18 @@
 
 static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
 {
-	print_hex_dump_bytes("head     ", DUMP_PREFIX_OFFSET,
-			     b->head, b->head_len);
-	print_hex_dump_bytes("tail     ", DUMP_PREFIX_OFFSET,
-			     b->tail, b->tail_len);
-	print_hex_dump_bytes("BCON IE  ", DUMP_PREFIX_OFFSET,
-			     b->beacon_ies, b->beacon_ies_len);
-	print_hex_dump_bytes("PROBE    ", DUMP_PREFIX_OFFSET,
-			     b->probe_resp, b->probe_resp_len);
-	print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
-			     b->proberesp_ies, b->proberesp_ies_len);
-	print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
-			     b->assocresp_ies, b->assocresp_ies_len);
+	wil_hex_dump_misc("head     ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->head, b->head_len, true);
+	wil_hex_dump_misc("tail     ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->tail, b->tail_len, true);
+	wil_hex_dump_misc("BCON IE  ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->beacon_ies, b->beacon_ies_len, true);
+	wil_hex_dump_misc("PROBE    ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->probe_resp, b->probe_resp_len, true);
+	wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->proberesp_ies, b->proberesp_ies_len, true);
+	wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->assocresp_ies, b->assocresp_ies_len, true);
 }
 
 /* internal functions for device reset and starting AP */
@@ -1283,6 +1287,7 @@
 	wil->pbss = pbss;
 
 	netif_carrier_on(ndev);
+	wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
 
 	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
 	if (rc)
@@ -1298,6 +1303,7 @@
 	wmi_pcp_stop(wil);
 err_pcp_start:
 	netif_carrier_off(ndev);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 out:
 	mutex_unlock(&wil->mutex);
 	return rc;
@@ -1383,8 +1389,8 @@
 	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
 		     info->dtim_period);
 	wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
-	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-			     info->ssid, info->ssid_len);
+	wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+			  info->ssid, info->ssid_len, true);
 	wil_print_bcon_data(bcon);
 	wil_print_crypto(wil, crypto);
 
@@ -1404,6 +1410,7 @@
 	wil_dbg_misc(wil, "stop_ap\n");
 
 	netif_carrier_off(ndev);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
 	mutex_lock(&wil->mutex);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3e8cdf1..5648ebb 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -524,9 +524,8 @@
 	if (!buf)
 		return -ENOMEM;
 
-	wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
-				    (const volatile void __iomem *)
-				    wil_blob->blob.data + pos, count);
+	wil_memcpy_fromio_32(buf, (const void __iomem *)
+			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
 	kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2c48419..36959a3 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -30,8 +30,8 @@
 module_param(debug_fw, bool, 0444);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
-static bool oob_mode;
-module_param(oob_mode, bool, 0444);
+static u8 oob_mode;
+module_param(oob_mode, byte, 0444);
 MODULE_PARM_DESC(oob_mode,
 		 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
 
@@ -135,14 +135,6 @@
 		*d++ = __raw_readl(s++);
 }
 
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
-				 const volatile void __iomem *src, size_t count)
-{
-	wil_halp_vote(wil);
-	wil_memcpy_fromio_32(dst, src, count);
-	wil_halp_unvote(wil);
-}
-
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count)
 {
@@ -153,15 +145,6 @@
 		__raw_writel(*s++, d++);
 }
 
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
-			       volatile void __iomem *dst,
-			       const void *src, size_t count)
-{
-	wil_halp_vote(wil);
-	wil_memcpy_toio_32(dst, src, count);
-	wil_halp_unvote(wil);
-}
-
 static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
 			       u16 reason_code, bool from_event)
 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -274,15 +257,20 @@
 		wil_bcast_fini(wil);
 		wil_update_net_queues_bh(wil, NULL, true);
 		netif_carrier_off(ndev);
+		wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 
 		if (test_bit(wil_status_fwconnected, wil->status)) {
 			clear_bit(wil_status_fwconnected, wil->status);
 			cfg80211_disconnected(ndev, reason_code,
-					      NULL, 0, false, GFP_KERNEL);
+					      NULL, 0,
+					      wil->locally_generated_disc,
+					      GFP_KERNEL);
+			wil->locally_generated_disc = false;
 		} else if (test_bit(wil_status_fwconnecting, wil->status)) {
 			cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
 						WLAN_STATUS_UNSPECIFIED_FAILURE,
 						GFP_KERNEL);
+			wil->bss = NULL;
 		}
 		clear_bit(wil_status_fwconnecting, wil->status);
 		break;
@@ -304,10 +292,34 @@
 {
 	struct wil6210_priv *wil = container_of(work,
 			struct wil6210_priv, disconnect_worker);
+	struct net_device *ndev = wil_to_ndev(wil);
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_disconnect_event evt;
+	} __packed reply;
 
-	mutex_lock(&wil->mutex);
-	_wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
-	mutex_unlock(&wil->mutex);
+	if (test_bit(wil_status_fwconnected, wil->status))
+		/* connect succeeded after all */
+		return;
+
+	if (!test_bit(wil_status_fwconnecting, wil->status))
+		/* already disconnected */
+		return;
+
+	rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+		      WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
+		      WIL6210_DISCONNECT_TO_MS);
+	if (rc) {
+		wil_err(wil, "disconnect error %d\n", rc);
+		return;
+	}
+
+	wil_update_net_queues_bh(wil, NULL, true);
+	netif_carrier_off(ndev);
+	cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
+				WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
+	clear_bit(wil_status_fwconnecting, wil->status);
 }
 
 static void wil_connect_timer_fn(ulong x)
@@ -557,6 +569,12 @@
 	return -EAGAIN;
 }
 
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
+{
+	if (wil->platform_ops.bus_request)
+		wil->platform_ops.bus_request(wil->platform_handle, kbps);
+}
+
 /**
  * wil6210_disconnect - disconnect one connection
  * @wil: driver context
@@ -610,13 +628,25 @@
 	wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
-static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
 {
-	wil_info(wil, "enable=%d\n", enable);
-	if (enable)
+	wil_info(wil, "oob_mode to %d\n", mode);
+	switch (mode) {
+	case 0:
+		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
+		      BIT_USER_OOB_R2_MODE);
+		break;
+	case 1:
+		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
 		wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
-	else
+		break;
+	case 2:
 		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+		wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+		break;
+	default:
+		wil_err(wil, "invalid oob_mode: %d\n", mode);
+	}
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -1073,9 +1103,7 @@
 	napi_enable(&wil->napi_tx);
 	set_bit(wil_status_napi_en, wil->status);
 
-	if (wil->platform_ops.bus_request)
-		wil->platform_ops.bus_request(wil->platform_handle,
-					      WIL_MAX_BUS_REQUEST_KBPS);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 
 	return 0;
 }
@@ -1099,8 +1127,7 @@
 
 	set_bit(wil_status_resetting, wil->status);
 
-	if (wil->platform_ops.bus_request)
-		wil->platform_ops.bus_request(wil->platform_handle, 0);
+	wil6210_bus_request(wil, 0);
 
 	wil_disable_irq(wil);
 	if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
@@ -1163,6 +1190,7 @@
 		    wil->halp.ref_cnt);
 
 	if (++wil->halp.ref_cnt == 1) {
+		reinit_completion(&wil->halp.comp);
 		wil6210_set_halp(wil);
 		rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
 		if (!rc) {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 7260bef..2ae4fe8 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -71,6 +71,11 @@
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 0;
+	}
+
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
 		rc = wil_down(wil);
@@ -86,10 +91,14 @@
 
 	if (wil->platform_ops.suspend) {
 		rc = wil->platform_ops.suspend(wil->platform_handle);
-		if (rc)
+		if (rc) {
 			wil_enable_irq(wil);
+			goto out;
+		}
 	}
 
+	set_bit(wil_status_suspended, wil->status);
+
 out:
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
@@ -117,10 +126,13 @@
 
 	/* if netif up, bring hardware up
 	 * During open(), IFF_UP set after actual device method
-	 * invocation. This prevent recursive call to wil_up()
+	 * invocation. This prevent recursive call to wil_up().
+	 * wil_status_suspended will be cleared in wil_reset
 	 */
 	if (ndev->flags & IFF_UP)
 		rc = wil_up(wil);
+	else
+		clear_bit(wil_status_suspended, wil->status);
 
 out:
 	wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4bccef3..734449d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -41,6 +41,7 @@
 #define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
 #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
+#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
 /**
@@ -140,6 +141,7 @@
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
+	#define BIT_USER_OOB_R2_MODE		BIT(30)
 #define RGF_USER_USAGE_8		(0x880020)
 	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
 	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
@@ -413,6 +415,7 @@
 	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
+	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_last /* keep last */
 };
 
@@ -616,6 +619,8 @@
 	u16 channel; /* relevant in AP mode */
 	int sinfo_gen;
 	u32 ap_isolate; /* no intra-BSS communication */
+	struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
+	int locally_generated_disc; /* relevant in STA mode */
 	/* interrupt moderation */
 	u32 tx_max_burst_duration;
 	u32 tx_interframe_timeout;
@@ -771,6 +776,12 @@
 			 print_hex_dump_debug("DBG[ WMI]" prefix_str,\
 					prefix_type, rowsize,	\
 					groupsize, buf, len, ascii)
+
+#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize,	\
+			  groupsize, buf, len, ascii)		\
+			  print_hex_dump_debug("DBG[MISC]" prefix_str,\
+					prefix_type, rowsize,	\
+					groupsize, buf, len, ascii)
 #else /* defined(CONFIG_DYNAMIC_DEBUG) */
 static inline
 void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
@@ -783,18 +794,18 @@
 		      int groupsize, const void *buf, size_t len, bool ascii)
 {
 }
+
+static inline
+void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize,
+		       int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
 #endif /* defined(CONFIG_DYNAMIC_DEBUG) */
 
 void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
 			  size_t count);
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count);
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
-				 const volatile void __iomem *src,
-				 size_t count);
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
-			       volatile void __iomem *dst,
-			       const void *src, size_t count);
 
 void *wil_if_alloc(struct device *dev);
 void wil_if_free(struct wil6210_priv *wil);
@@ -910,7 +921,7 @@
 		 u8 type);
 int wmi_abort_scan(struct wil6210_priv *wil);
 void wil_abort_scan(struct wil6210_priv *wil, bool sync);
-
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
 			u16 reason_code, bool from_event);
 void wil_probe_client_flush(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0ede7f7..31d6ab9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -566,6 +566,7 @@
 	    (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
 		if (rc) {
 			netif_carrier_off(ndev);
+			wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 			wil_err(wil, "cfg80211_connect_result with failure\n");
 			cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
 						NULL, 0,
@@ -573,12 +574,16 @@
 						GFP_KERNEL);
 			goto out;
 		} else {
-			cfg80211_connect_result(ndev, evt->bssid,
-						assoc_req_ie, assoc_req_ielen,
-						assoc_resp_ie, assoc_resp_ielen,
-						WLAN_STATUS_SUCCESS,
-						GFP_KERNEL);
+			struct wiphy *wiphy = wil_to_wiphy(wil);
+
+			cfg80211_ref_bss(wiphy, wil->bss);
+			cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
+					     assoc_req_ie, assoc_req_ielen,
+					     assoc_resp_ie, assoc_resp_ielen,
+					     WLAN_STATUS_SUCCESS, GFP_KERNEL,
+					     NL80211_TIMEOUT_UNSPECIFIED);
 		}
+		wil->bss = NULL;
 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
 		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
 		if (rc) {
@@ -1524,6 +1529,7 @@
 
 	wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
 
+	wil->locally_generated_disc = true;
 	if (del_sta) {
 		ether_addr_copy(del_sta_cmd.dst_mac, mac);
 		rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
@@ -1765,14 +1771,19 @@
 
 void wmi_event_flush(struct wil6210_priv *wil)
 {
+	ulong flags;
 	struct pending_wmi_event *evt, *t;
 
 	wil_dbg_wmi(wil, "event_flush\n");
 
+	spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
 	list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
 		list_del(&evt->list);
 		kfree(evt);
 	}
+
+	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 67adf58..30c31a8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -526,7 +526,6 @@
 	msm_mux_reserved30,
 	msm_mux_qup11,
 	msm_mux_qup14,
-	msm_mux_phase_flag3,
 	msm_mux_reserved96,
 	msm_mux_ldo_en,
 	msm_mux_reserved97,
@@ -543,17 +542,13 @@
 	msm_mux_phase_flag5,
 	msm_mux_reserved103,
 	msm_mux_reserved104,
-	msm_mux_pcie1_forceon,
 	msm_mux_uim2_data,
 	msm_mux_qup13,
 	msm_mux_reserved105,
-	msm_mux_pcie1_pwren,
 	msm_mux_uim2_clk,
 	msm_mux_reserved106,
-	msm_mux_pcie1_auxen,
 	msm_mux_uim2_reset,
 	msm_mux_reserved107,
-	msm_mux_pcie1_button,
 	msm_mux_uim2_present,
 	msm_mux_reserved108,
 	msm_mux_uim1_data,
@@ -564,7 +559,6 @@
 	msm_mux_reserved111,
 	msm_mux_uim1_present,
 	msm_mux_reserved112,
-	msm_mux_pcie1_prsnt2,
 	msm_mux_uim_batt,
 	msm_mux_edp_hot,
 	msm_mux_reserved113,
@@ -587,7 +581,6 @@
 	msm_mux_reserved123,
 	msm_mux_reserved124,
 	msm_mux_reserved125,
-	msm_mux_sd_card,
 	msm_mux_reserved126,
 	msm_mux_reserved127,
 	msm_mux_reserved128,
@@ -647,7 +640,6 @@
 	msm_mux_reserved42,
 	msm_mux_reserved43,
 	msm_mux_reserved44,
-	msm_mux_bt_reset,
 	msm_mux_qup6,
 	msm_mux_reserved45,
 	msm_mux_reserved46,
@@ -672,7 +664,6 @@
 	msm_mux_gcc_gp1,
 	msm_mux_phase_flag18,
 	msm_mux_reserved57,
-	msm_mux_ssc_irq,
 	msm_mux_phase_flag19,
 	msm_mux_reserved58,
 	msm_mux_phase_flag20,
@@ -731,10 +722,8 @@
 	msm_mux_reserved82,
 	msm_mux_reserved83,
 	msm_mux_reserved84,
-	msm_mux_pcie1_pwrfault,
 	msm_mux_qup5,
 	msm_mux_reserved85,
-	msm_mux_pcie1_mrl,
 	msm_mux_reserved86,
 	msm_mux_reserved87,
 	msm_mux_reserved88,
@@ -772,6 +761,7 @@
 	msm_mux_reserved95,
 	msm_mux_tsif2_sync,
 	msm_mux_sdc40,
+	msm_mux_phase_flag3,
 	msm_mux_NA,
 };
 
@@ -781,19 +771,24 @@
 	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
 	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
 	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
-	"gpio36", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
-	"gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", "gpio51",
-	"gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57", "gpio64",
-	"gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
-	"gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio81",
-	"gpio82", "gpio83", "gpio84", "gpio87", "gpio88", "gpio89", "gpio90",
-	"gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
-	"gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
-	"gpio109", "gpio110", "gpio111", "gpio112", "gpio114", "gpio115",
-	"gpio116", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
-	"gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
-	"gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
-	"gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+	"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+	"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+	"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+	"gpio147", "gpio148", "gpio149",
 };
 static const char * const qup0_groups[] = {
 	"gpio0", "gpio1", "gpio2", "gpio3",
@@ -1075,9 +1070,6 @@
 static const char * const qup14_groups[] = {
 	"gpio31", "gpio32", "gpio33", "gpio34",
 };
-static const char * const phase_flag3_groups[] = {
-	"gpio96",
-};
 static const char * const reserved96_groups[] = {
 	"gpio96",
 };
@@ -1109,7 +1101,7 @@
 	"gpio101",
 };
 static const char * const pci_e1_groups[] = {
-	"gpio102", "gpio103", "gpio104",
+	"gpio102", "gpio103",
 };
 static const char * const prng_rosc_groups[] = {
 	"gpio102",
@@ -1126,9 +1118,6 @@
 static const char * const reserved104_groups[] = {
 	"gpio104",
 };
-static const char * const pcie1_forceon_groups[] = {
-	"gpio105",
-};
 static const char * const uim2_data_groups[] = {
 	"gpio105",
 };
@@ -1138,27 +1127,18 @@
 static const char * const reserved105_groups[] = {
 	"gpio105",
 };
-static const char * const pcie1_pwren_groups[] = {
-	"gpio106",
-};
 static const char * const uim2_clk_groups[] = {
 	"gpio106",
 };
 static const char * const reserved106_groups[] = {
 	"gpio106",
 };
-static const char * const pcie1_auxen_groups[] = {
-	"gpio107",
-};
 static const char * const uim2_reset_groups[] = {
 	"gpio107",
 };
 static const char * const reserved107_groups[] = {
 	"gpio107",
 };
-static const char * const pcie1_button_groups[] = {
-	"gpio108",
-};
 static const char * const uim2_present_groups[] = {
 	"gpio108",
 };
@@ -1189,9 +1169,6 @@
 static const char * const reserved112_groups[] = {
 	"gpio112",
 };
-static const char * const pcie1_prsnt2_groups[] = {
-	"gpio113",
-};
 static const char * const uim_batt_groups[] = {
 	"gpio113",
 };
@@ -1259,9 +1236,6 @@
 static const char * const reserved125_groups[] = {
 	"gpio125",
 };
-static const char * const sd_card_groups[] = {
-	"gpio126",
-};
 static const char * const reserved126_groups[] = {
 	"gpio126",
 };
@@ -1380,7 +1354,7 @@
 	"gpio34",
 };
 static const char * const pci_e0_groups[] = {
-	"gpio35", "gpio36", "gpio37",
+	"gpio35", "gpio36",
 };
 static const char * const jitter_bist_groups[] = {
 	"gpio35",
@@ -1439,9 +1413,6 @@
 static const char * const reserved44_groups[] = {
 	"gpio44",
 };
-static const char * const bt_reset_groups[] = {
-	"gpio45",
-};
 static const char * const qup6_groups[] = {
 	"gpio45", "gpio46", "gpio47", "gpio48",
 };
@@ -1514,11 +1485,6 @@
 static const char * const reserved57_groups[] = {
 	"gpio57",
 };
-static const char * const ssc_irq_groups[] = {
-	"gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio78",
-	"gpio79", "gpio80", "gpio117", "gpio118", "gpio119", "gpio120",
-	"gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
-};
 static const char * const phase_flag19_groups[] = {
 	"gpio58",
 };
@@ -1693,18 +1659,12 @@
 static const char * const reserved84_groups[] = {
 	"gpio84",
 };
-static const char * const pcie1_pwrfault_groups[] = {
-	"gpio85",
-};
 static const char * const qup5_groups[] = {
 	"gpio85", "gpio86", "gpio87", "gpio88",
 };
 static const char * const reserved85_groups[] = {
 	"gpio85",
 };
-static const char * const pcie1_mrl_groups[] = {
-	"gpio86",
-};
 static const char * const reserved86_groups[] = {
 	"gpio86",
 };
@@ -1816,6 +1776,9 @@
 static const char * const sdc40_groups[] = {
 	"gpio96",
 };
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
 
 static const struct msm_function sdm845_functions[] = {
 	FUNCTION(gpio),
@@ -1912,7 +1875,6 @@
 	FUNCTION(reserved30),
 	FUNCTION(qup11),
 	FUNCTION(qup14),
-	FUNCTION(phase_flag3),
 	FUNCTION(reserved96),
 	FUNCTION(ldo_en),
 	FUNCTION(reserved97),
@@ -1929,17 +1891,13 @@
 	FUNCTION(phase_flag5),
 	FUNCTION(reserved103),
 	FUNCTION(reserved104),
-	FUNCTION(pcie1_forceon),
 	FUNCTION(uim2_data),
 	FUNCTION(qup13),
 	FUNCTION(reserved105),
-	FUNCTION(pcie1_pwren),
 	FUNCTION(uim2_clk),
 	FUNCTION(reserved106),
-	FUNCTION(pcie1_auxen),
 	FUNCTION(uim2_reset),
 	FUNCTION(reserved107),
-	FUNCTION(pcie1_button),
 	FUNCTION(uim2_present),
 	FUNCTION(reserved108),
 	FUNCTION(uim1_data),
@@ -1950,7 +1908,6 @@
 	FUNCTION(reserved111),
 	FUNCTION(uim1_present),
 	FUNCTION(reserved112),
-	FUNCTION(pcie1_prsnt2),
 	FUNCTION(uim_batt),
 	FUNCTION(edp_hot),
 	FUNCTION(reserved113),
@@ -1973,7 +1930,6 @@
 	FUNCTION(reserved123),
 	FUNCTION(reserved124),
 	FUNCTION(reserved125),
-	FUNCTION(sd_card),
 	FUNCTION(reserved126),
 	FUNCTION(reserved127),
 	FUNCTION(reserved128),
@@ -2033,7 +1989,6 @@
 	FUNCTION(reserved42),
 	FUNCTION(reserved43),
 	FUNCTION(reserved44),
-	FUNCTION(bt_reset),
 	FUNCTION(qup6),
 	FUNCTION(reserved45),
 	FUNCTION(reserved46),
@@ -2058,7 +2013,6 @@
 	FUNCTION(gcc_gp1),
 	FUNCTION(phase_flag18),
 	FUNCTION(reserved57),
-	FUNCTION(ssc_irq),
 	FUNCTION(phase_flag19),
 	FUNCTION(reserved58),
 	FUNCTION(phase_flag20),
@@ -2117,10 +2071,8 @@
 	FUNCTION(reserved82),
 	FUNCTION(reserved83),
 	FUNCTION(reserved84),
-	FUNCTION(pcie1_pwrfault),
 	FUNCTION(qup5),
 	FUNCTION(reserved85),
-	FUNCTION(pcie1_mrl),
 	FUNCTION(reserved86),
 	FUNCTION(reserved87),
 	FUNCTION(reserved88),
@@ -2158,6 +2110,7 @@
 	FUNCTION(reserved95),
 	FUNCTION(tsif2_sync),
 	FUNCTION(sdc40),
+	FUNCTION(phase_flag3),
 };
 
 static const struct msm_pingroup sdm845_groups[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index fb42ef7..f5d8227 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -3546,16 +3546,32 @@
 	ipa_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
 	if (ipa_ctx->ipa_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa_ctx->ipa_active_clients.cnt++;
+		ipa2_active_clients_log_inc(&log_info, false);
+		ipa_active_clients_unlock();
+
 		clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
 		if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
 			if (msm_bus_scale_client_update_request(
 			    ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
 				WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG("clocks are gated, not setting rate\n");
+		 ipa_active_clients_unlock();
 	}
-	ipa_active_clients_unlock();
 	IPADBG("Done\n");
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index cd575fe..5568f8b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,6 +51,7 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_BAM_STOP_MAX_RETRY 10
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 01eea36..9a3c146 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -591,6 +591,7 @@
 {
 	int index;
 	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	int retries = 0;
 
 	mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
 
@@ -600,6 +601,7 @@
 		return -EBADF;
 	}
 
+send_cmd:
 	init_completion(&ipa_ctx->uc_ctx.uc_completion);
 
 	ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
@@ -659,6 +661,19 @@
 	}
 
 	if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+		if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR ==
+			ipa_ctx->uc_ctx.uc_status) {
+			retries++;
+			if (retries == IPA_BAM_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+			} else {
+				/* sleep for short period to flush IPA */
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+				goto send_cmd;
+			}
+		}
+
 		IPAERR("Recevied status %u, Expected status %u\n",
 			ipa_ctx->uc_ctx.uc_status, expected_status);
 		ipa_ctx->uc_ctx.pending_cmd = -1;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
index 3bec471..a98d602 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -441,7 +441,7 @@
 
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent
+ * enum ipa_hw_2_cpu_offload_cmd_resp_status -  Values that represent
  * offload related command response status to be sent to CPU.
  */
 enum ipa_hw_2_cpu_offload_cmd_resp_status {
@@ -478,6 +478,47 @@
 };
 
 /**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
  * struct IpaHwSetUpCmd  -
  *
  *
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 128674a..b7815cb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -111,47 +111,6 @@
 };
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
- * command response status to be sent to CPU.
- */
-enum ipa_hw_2_cpu_cmd_resp_status {
-	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
-	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
-	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
-	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
-	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
-	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
-	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
-	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
-	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
-	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
-	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
-	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
-	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
-	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
-	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
-	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
-	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
-};
-
-/**
  * enum ipa_hw_wdi_errors - WDI specific error types.
  * @IPA_HW_WDI_ERROR_NONE : No error persists
  * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 30f5712..862b147 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3569,16 +3569,32 @@
 	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
 	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa3_ctx->ipa3_active_clients.cnt++;
+		ipa3_active_clients_log_inc(&log_info, false);
+		ipa3_active_clients_unlock();
+
 		if (ipa3_clk)
 			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
 		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
 			ipa3_get_bus_vote()))
 			WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG_LOW("clocks are gated, not setting rate\n");
+		ipa3_active_clients_unlock();
 	}
-	ipa3_active_clients_unlock();
 	IPADBG_LOW("Done\n");
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 19c3de4a..73738bf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -53,6 +53,8 @@
 static bool workqueues_stopped;
 static bool ipa3_modem_init_cmplt;
 static bool first_time_handshake;
+struct mutex ipa3_qmi_lock;
+
 /* QMI A5 service */
 
 static struct msg_desc ipa3_indication_reg_req_desc = {
@@ -610,12 +612,17 @@
 		req->filter_spec_ex_list_len);
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
-		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
-			req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
 	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
@@ -655,12 +662,17 @@
 		req->filter_spec_ex_list_len);
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
-		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
-		req, sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len =
 		QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01;
@@ -796,12 +808,17 @@
 		return -EINVAL;
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
-		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
-		req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
-	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
-	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len =
 	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
@@ -1339,3 +1356,13 @@
 		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
 }
 
+void ipa3_qmi_init(void)
+{
+	mutex_init(&ipa3_qmi_lock);
+}
+
+void ipa3_qmi_cleanup(void)
+{
+	mutex_destroy(&ipa3_qmi_lock);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 4fde261..6cd82f8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -204,6 +204,10 @@
 
 void ipa3_q6_handshake_complete(bool ssr_bootup);
 
+void ipa3_qmi_init(void);
+
+void ipa3_qmi_cleanup(void);
+
 #else /* CONFIG_RMNET_IPA3 */
 
 static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
@@ -316,6 +320,14 @@
 
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
+static inline void ipa3_qmi_init(void)
+{
+}
+
+static inline void ipa3_qmi_cleanup(void)
+{
+}
+
 #endif /* CONFIG_RMNET_IPA3 */
 
 #endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 56e7718..a15bd04 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3206,6 +3206,9 @@
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
+
+	ipa3_qmi_init();
+
 	/* Register for Modem SSR */
 	rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
 			SUBSYS_MODEM,
@@ -3219,7 +3222,7 @@
 static void __exit ipa3_wwan_cleanup(void)
 {
 	int ret;
-
+	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 47da1b3..5595b7b 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -36,7 +36,7 @@
 #define WIGIG_VENDOR (0x1ae9)
 #define WIGIG_DEVICE (0x0310)
 
-#define SMMU_BASE	0x10000000 /* Device address range base */
+#define SMMU_BASE	0x20000000 /* Device address range base */
 #define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
 
 #define WIGIG_ENABLE_DELAY	50
@@ -93,9 +93,12 @@
 
 	/* SMMU */
 	bool use_smmu; /* have SMMU enabled? */
-	int smmu_bypass;
+	int smmu_s1_en;
 	int smmu_fast_map;
+	int smmu_coherent;
 	struct dma_iommu_mapping *mapping;
+	u32 smmu_base;
+	u32 smmu_size;
 
 	/* bus frequency scaling */
 	struct msm_bus_scale_pdata *bus_scale;
@@ -638,15 +641,20 @@
 {
 	int atomic_ctx = 1;
 	int rc;
+	int force_pt_coherent = 1;
+	int smmu_bypass = !ctx->smmu_s1_en;
+	dma_addr_t iova_base = 0;
+	dma_addr_t iova_end =  ctx->smmu_base + ctx->smmu_size - 1;
+	struct iommu_domain_geometry geometry;
 
 	if (!ctx->use_smmu)
 		return 0;
 
-	dev_info(ctx->dev, "Initialize SMMU, bypass = %d, fastmap = %d\n",
-		 ctx->smmu_bypass, ctx->smmu_fast_map);
+	dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
+		 smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
 
 	ctx->mapping = arm_iommu_create_mapping(&platform_bus_type,
-						SMMU_BASE, SMMU_SIZE);
+						ctx->smmu_base, ctx->smmu_size);
 	if (IS_ERR_OR_NULL(ctx->mapping)) {
 		rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
 		dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
@@ -662,23 +670,50 @@
 		goto release_mapping;
 	}
 
-	if (ctx->smmu_bypass) {
+	if (smmu_bypass) {
 		rc = iommu_domain_set_attr(ctx->mapping->domain,
 					   DOMAIN_ATTR_S1_BYPASS,
-					   &ctx->smmu_bypass);
+					   &smmu_bypass);
 		if (rc) {
 			dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
 				rc);
 			goto release_mapping;
 		}
-	} else if (ctx->smmu_fast_map) {
-		rc = iommu_domain_set_attr(ctx->mapping->domain,
-					   DOMAIN_ATTR_FAST,
-					   &ctx->smmu_fast_map);
-		if (rc) {
-			dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
-				rc);
-			goto release_mapping;
+	} else {
+		/* Set dma-coherent and page table coherency */
+		if (ctx->smmu_coherent) {
+			arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+				   DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+				   &force_pt_coherent);
+			if (rc) {
+				dev_err(ctx->dev,
+					"Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
+		}
+
+		if (ctx->smmu_fast_map) {
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+						   DOMAIN_ATTR_FAST,
+						   &ctx->smmu_fast_map);
+			if (rc) {
+				dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
+			memset(&geometry, 0, sizeof(geometry));
+			geometry.aperture_start = iova_base;
+			geometry.aperture_end = iova_end;
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+						   DOMAIN_ATTR_GEOMETRY,
+						   &geometry);
+			if (rc) {
+				dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
 		}
 	}
 
@@ -900,6 +935,7 @@
 	struct device_node *of_node = dev->of_node;
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
+	u32 smmu_mapping[2];
 	int rc;
 	u32 val;
 
@@ -954,8 +990,27 @@
 	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
 
-	ctx->smmu_bypass = 1;
-	ctx->smmu_fast_map = 0;
+	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
+	if (ctx->smmu_s1_en) {
+		ctx->smmu_fast_map = of_property_read_bool(
+						of_node, "qcom,smmu-fast-map");
+		ctx->smmu_coherent = of_property_read_bool(
+						of_node, "qcom,smmu-coherent");
+	}
+	rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
+			smmu_mapping, 2);
+	if (rc) {
+		dev_err(ctx->dev,
+			"Failed to read base/size smmu addresses %d, fallback to default\n",
+			rc);
+		ctx->smmu_base = SMMU_BASE;
+		ctx->smmu_size = SMMU_SIZE;
+	} else {
+		ctx->smmu_base = smmu_mapping[0];
+		ctx->smmu_size = smmu_mapping[1];
+	}
+	dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
+		ctx->smmu_base, ctx->smmu_size);
 
 	/*== execute ==*/
 	/* turn device on */
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 79ea712..362375f 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -20,6 +20,16 @@
 	  The driver reports the charger status via the power supply framework.
 	  A charger status change triggers an IRQ via the device STAT pin.
 
+config SMB1355_SLAVE_CHARGER
+	tristate "SMB1355 Slave Battery Charger"
+	depends on MFD_I2C_PMIC
+	help
+	  Say Y to include support for SMB1355 Battery Charger.
+	  SMB1355 is a single phase 5A battery charger.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
 config SMB1351_USB_CHARGER
 	tristate "smb1351 usb charger (with VBUS detection)"
 	depends on I2C
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 171444f..bc19b24 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_QPNP_FG_GEN3)     += qpnp-fg-gen3.o fg-memif.o fg-util.o
 obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
+obj-$(CONFIG_SMB1355_SLAVE_CHARGER)   += smb1355-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
 obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
 obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
new file mode 100644
index 0000000..d5fff74
--- /dev/null
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -0,0 +1,675 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "SMB1355: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
+
+#define SMB1355_DEFAULT_FCC_UA 1000000
+
+/* SMB1355 registers, different than mentioned in smb-reg.h */
+
+#define CHGR_BASE	0x1000
+#define BATIF_BASE	0x1200
+#define USBIN_BASE	0x1300
+#define MISC_BASE	0x1600
+
+#define BATTERY_STATUS_2_REG			(CHGR_BASE + 0x0B)
+#define DISABLE_CHARGING_BIT			BIT(3)
+
+#define BATTERY_STATUS_3_REG			(CHGR_BASE + 0x0C)
+#define BATT_GT_PRE_TO_FAST_BIT			BIT(4)
+#define ENABLE_CHARGING_BIT			BIT(3)
+
+#define CHGR_CFG2_REG				(CHGR_BASE + 0x51)
+#define CHG_EN_SRC_BIT				BIT(7)
+#define CHG_EN_POLARITY_BIT			BIT(6)
+
+#define CFG_REG					(CHGR_BASE + 0x53)
+#define CHG_OPTION_PIN_TRIM_BIT			BIT(7)
+#define BATN_SNS_CFG_BIT			BIT(4)
+#define CFG_TAPER_DIS_AFVC_BIT			BIT(3)
+#define BATFET_SHUTDOWN_CFG_BIT			BIT(2)
+#define VDISCHG_EN_CFG_BIT			BIT(1)
+#define VCHG_EN_CFG_BIT				BIT(0)
+
+#define FAST_CHARGE_CURRENT_CFG_REG		(CHGR_BASE + 0x61)
+#define FAST_CHARGE_CURRENT_SETTING_MASK	GENMASK(7, 0)
+
+#define CHGR_BATTOV_CFG_REG			(CHGR_BASE + 0x70)
+#define BATTOV_SETTING_MASK			GENMASK(7, 0)
+
+#define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT			BIT(0)
+
+#define WD_CFG_REG				(MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
+#define BARK_WDOG_INT_EN_BIT			BIT(6)
+#define BITE_WDOG_INT_EN_BIT			BIT(5)
+#define WDOG_IRQ_SFT_BIT			BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
+
+struct smb_chg_param {
+	const char	*name;
+	u16		reg;
+	int		min_u;
+	int		max_u;
+	int		step_u;
+};
+
+struct smb_params {
+	struct smb_chg_param	fcc;
+	struct smb_chg_param	ov;
+};
+
+static struct smb_params v1_params = {
+	.fcc		= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.ov		= {
+		.name	= "battery over voltage",
+		.reg	= CHGR_BATTOV_CFG_REG,
+		.min_u	= 2450000,
+		.max_u	= 5000000,
+		.step_u	= 10000,
+	},
+};
+
+struct smb_irq_info {
+	const char		*name;
+	const irq_handler_t	handler;
+	const bool		wake;
+	int			irq;
+};
+
+struct smb1355 {
+	struct device		*dev;
+	char			*name;
+	struct regmap		*regmap;
+
+	struct smb_params	param;
+
+	struct mutex		write_lock;
+
+	struct power_supply	*parallel_psy;
+	struct pmic_revid_data	*pmic_rev_id;
+};
+
+static bool is_secure(struct smb1355 *chip, int addr)
+{
+	/* assume everything above 0xA0 is secure */
+	return (addr & 0xFF) >= 0xA0;
+}
+
+static int smb1355_read(struct smb1355 *chip, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc;
+
+	rc = regmap_read(chip->regmap, addr, &temp);
+	if (rc >= 0)
+		*val = (u8)temp;
+
+	return rc;
+}
+
+static int smb1355_masked_write(struct smb1355 *chip, u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int smb1355_write(struct smb1355 *chip, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->write_lock);
+
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap, (addr & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write(chip->regmap, addr, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int smb1355_set_charge_param(struct smb1355 *chip,
+			struct smb_chg_param *param, int val_u)
+{
+	int rc;
+	u8 val_raw;
+
+	if (val_u > param->max_u || val_u < param->min_u) {
+		pr_err("%s: %d is out of range [%d, %d]\n",
+			param->name, val_u, param->min_u, param->max_u);
+		return -EINVAL;
+	}
+
+	val_raw = (val_u - param->min_u) / param->step_u;
+
+	rc = smb1355_write(chip, param->reg, val_raw);
+	if (rc < 0) {
+		pr_err("%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+			param->name, val_raw, param->reg, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb1355_get_charge_param(struct smb1355 *chip,
+			struct smb_chg_param *param, int *val_u)
+{
+	int rc;
+	u8 val_raw;
+
+	rc = smb1355_read(chip, param->reg, &val_raw);
+	if (rc < 0) {
+		pr_err("%s: Couldn't read from 0x%04x rc=%d\n",
+			param->name, param->reg, rc);
+		return rc;
+	}
+
+	*val_u = val_raw * param->step_u + param->min_u;
+
+	return rc;
+}
+
+static irqreturn_t smb1355_handle_chg_state_change(int irq, void *data)
+{
+	struct smb1355 *chip = data;
+
+	if (chip->parallel_psy)
+		power_supply_changed(chip->parallel_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t smb1355_handle_wdog_bark(int irq, void *data)
+{
+	struct smb1355 *chip = data;
+	int rc;
+
+	rc = smb1355_write(chip, BARK_BITE_WDOG_PET_REG,
+					BARK_BITE_WDOG_PET_BIT);
+	if (rc < 0)
+		pr_err("Couldn't pet the dog rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+/*****************************
+ * PARALLEL PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb1355_parallel_props[] = {
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't read SMB1355_BATTERY_STATUS_3 rc=%d\n", rc);
+		return rc;
+	}
+
+	if (stat & ENABLE_CHARGING_BIT) {
+		if (stat & BATT_GT_PRE_TO_FAST_BIT)
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+		else
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	} else {
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	return rc;
+}
+
+static int smb1355_get_parallel_charging(struct smb1355 *chip, int *disabled)
+{
+	int rc;
+	u8 cfg2;
+
+	rc = smb1355_read(chip, CHGR_CFG2_REG, &cfg2);
+	if (rc < 0) {
+		pr_err("Couldn't read en_cmg_reg rc=%d\n", rc);
+		return rc;
+	}
+
+	if (cfg2 & CHG_EN_SRC_BIT)
+		*disabled = 0;
+	else
+		*disabled = 1;
+
+	return 0;
+}
+
+static int smb1355_parallel_get_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb1355 *chip = power_supply_get_drvdata(psy);
+	u8 stat;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smb1355_get_prop_batt_charge_type(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
+		if (rc >= 0)
+			val->intval = (bool)(stat & ENABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_PIN_ENABLED:
+		rc = smb1355_read(chip, BATTERY_STATUS_2_REG, &stat);
+		if (rc >= 0)
+			val->intval = !(stat & DISABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1355_get_parallel_charging(chip, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smb1355_get_charge_param(chip, &chip->param.ov,
+						&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smb1355_get_charge_param(chip, &chip->param.fcc,
+						&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = chip->name;
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		break;
+	default:
+		pr_err_ratelimited("parallel psy get prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb1355_set_parallel_charging(struct smb1355 *chip, bool disable)
+{
+	int rc;
+
+	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+				 disable ? 0 : WDOG_TIMER_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't %s watchdog rc=%d\n",
+		       disable ? "disable" : "enable", rc);
+		disable = true;
+	}
+
+	/*
+	 * Configure charge enable for high polarity and
+	 * When disabling charging set it to cmd register control(cmd bit=0)
+	 * When enabling charging set it to pin control
+	 */
+	rc = smb1355_masked_write(chip, CHGR_CFG2_REG,
+			CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT,
+			disable ? 0 : CHG_EN_SRC_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb1355_parallel_set_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     const union power_supply_propval *val)
+{
+	struct smb1355 *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1355_set_parallel_charging(chip, (bool)val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smb1355_set_charge_param(chip, &chip->param.ov,
+						val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smb1355_set_charge_param(chip, &chip->param.fcc,
+						val->intval);
+		break;
+	default:
+		pr_debug("parallel power supply set prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb1355_parallel_prop_is_writeable(struct power_supply *psy,
+					      enum power_supply_property prop)
+{
+	return 0;
+}
+
+static struct power_supply_desc parallel_psy_desc = {
+	.name			= "parallel",
+	.type			= POWER_SUPPLY_TYPE_PARALLEL,
+	.properties		= smb1355_parallel_props,
+	.num_properties		= ARRAY_SIZE(smb1355_parallel_props),
+	.get_property		= smb1355_parallel_get_prop,
+	.set_property		= smb1355_parallel_set_prop,
+	.property_is_writeable	= smb1355_parallel_prop_is_writeable,
+};
+
+static int smb1355_init_parallel_psy(struct smb1355 *chip)
+{
+	struct power_supply_config parallel_cfg = {};
+
+	parallel_cfg.drv_data = chip;
+	parallel_cfg.of_node = chip->dev->of_node;
+
+	/* change to smb1355's property list */
+	parallel_psy_desc.properties = smb1355_parallel_props;
+	parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+						   &parallel_psy_desc,
+						   &parallel_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel power supply\n");
+		return PTR_ERR(chip->parallel_psy);
+	}
+
+	return 0;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+
+static int smb1355_init_hw(struct smb1355 *chip)
+{
+	int rc;
+
+	/* enable watchdog bark and bite interrupts, and disable the watchdog */
+	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT
+			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+			| BARK_WDOG_INT_EN_BIT,
+			BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable charging when watchdog bites */
+	rc = smb1355_masked_write(chip, SNARL_BARK_BITE_WD_CFG_REG,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable parallel charging path */
+	rc = smb1355_set_parallel_charging(chip, true);
+	if (rc < 0) {
+		pr_err("Couldn't disable parallel path rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize FCC to 0 */
+	rc = smb1355_set_charge_param(chip, &chip->param.fcc, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set 0 FCC rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable parallel current sensing */
+	rc = smb1355_masked_write(chip, CFG_REG,
+				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable parallel current sensing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+static struct smb_irq_info smb1355_irqs[] = {
+	[0] = {
+		.name		= "wdog-bark",
+		.handler	= smb1355_handle_wdog_bark,
+	},
+	[1] = {
+		.name		= "chg-state-change",
+		.handler	= smb1355_handle_chg_state_change,
+		.wake		= true,
+	},
+};
+
+static int smb1355_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb1355_irqs); i++) {
+		if (strcmp(smb1355_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb1355_request_interrupt(struct smb1355 *chip,
+				struct device_node *node,
+				const char *irq_name)
+{
+	int rc = 0, irq, irq_index;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb1355_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb1355_irqs[irq_index].handler)
+		return 0;
+
+	rc = devm_request_threaded_irq(chip->dev, irq, NULL,
+				smb1355_irqs[irq_index].handler,
+				IRQF_ONESHOT, irq_name, chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
+		return rc;
+	}
+
+	if (smb1355_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb1355_request_interrupts(struct smb1355 *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					prop, name) {
+			rc = smb1355_request_interrupt(chip, child, name);
+			if (rc < 0) {
+				pr_err("Couldn't request interrupt %s rc=%d\n",
+					name, rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/*********
+ * PROBE *
+ *********/
+static const struct of_device_id match_table[] = {
+	{
+		.compatible	= "qcom,smb1355",
+	},
+	{ },
+};
+
+static int smb1355_probe(struct platform_device *pdev)
+{
+	struct smb1355 *chip;
+	const struct of_device_id *id;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->param = v1_params;
+	chip->name = "smb1355";
+	mutex_init(&chip->write_lock);
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_device(of_match_ptr(match_table), chip->dev);
+	if (!id) {
+		pr_err("Couldn't find a matching device\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb1355_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb1355_init_parallel_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb1355_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	pr_info("%s probed successfully\n", chip->name);
+	return rc;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb1355_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver smb1355_driver = {
+	.driver	= {
+		.name		= "qcom,smb1355-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe	= smb1355_probe,
+	.remove	= smb1355_remove,
+};
+module_platform_driver(smb1355_driver);
+
+MODULE_DESCRIPTION("QPNP SMB1355 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3f218f5..c5ab1b0 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@
  */
 static int storvsc_timeout = 180;
 
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
 static struct scsi_transport_template *fc_transport_template;
 #endif
@@ -1283,6 +1281,22 @@
 	return ret;
 }
 
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+	/*
+	 * Set blist flag to permit the reading of the VPD pages even when
+	 * the target may claim SPC-2 compliance. MSFT targets currently
+	 * claim SPC-2 compliance while they implement post SPC-2 features.
+	 * With this flag we can correctly handle WRITE_SAME_16 issues.
+	 *
+	 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+	 * still supports REPORT LUN.
+	 */
+	sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+	return 0;
+}
+
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
 
@@ -1298,14 +1312,6 @@
 	sdevice->no_write_same = 1;
 
 	/*
-	 * Add blist flags to permit the reading of the VPD pages even when
-	 * the target may claim SPC-2 compliance. MSFT targets currently
-	 * claim SPC-2 compliance while they implement post SPC-2 features.
-	 * With this patch we can correctly handle WRITE_SAME_16 issues.
-	 */
-	sdevice->sdev_bflags |= msft_blist_flags;
-
-	/*
 	 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
 	 * if the device is a MSFT virtual device.  If the host is
 	 * WIN10 or newer, allow write_same.
@@ -1569,6 +1575,7 @@
 	.eh_host_reset_handler =	storvsc_host_reset_handler,
 	.proc_name =		"storvsc_host",
 	.eh_timed_out =		storvsc_eh_timed_out,
+	.slave_alloc =		storvsc_device_alloc,
 	.slave_configure =	storvsc_device_configure,
 	.cmd_per_lun =		255,
 	.this_id =		-1,
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 8c5b0d0..b2cf03c 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -1,7 +1,7 @@
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o
+obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o
+obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o
 obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
 obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
 obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
deleted file mode 100644
index f3b1b83..0000000
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ /dev/null
@@ -1,837 +0,0 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/qdsp6v2/apr_tal.h>
-#include <linux/qdsp6v2/apr.h>
-#include <sound/voice_svc.h>
-
-#define MINOR_NUMBER 1
-#define APR_MAX_RESPONSE 10
-#define TIMEOUT_MS 1000
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-
-struct voice_svc_device {
-	struct cdev *cdev;
-	struct device *dev;
-	int major;
-};
-
-struct voice_svc_prvt {
-	void *apr_q6_mvm;
-	void *apr_q6_cvs;
-	uint16_t response_count;
-	struct list_head response_queue;
-	wait_queue_head_t response_wait;
-	spinlock_t response_lock;
-	/*
-	 * This mutex ensures responses are processed in sequential order and
-	 * that no two threads access and free the same response at the same
-	 * time.
-	 */
-	struct mutex response_mutex_lock;
-};
-
-struct apr_data {
-	struct apr_hdr hdr;
-	__u8 payload[0];
-} __packed;
-
-struct apr_response_list {
-	struct list_head list;
-	struct voice_svc_cmd_response resp;
-};
-
-static struct voice_svc_device *voice_svc_dev;
-static struct class *voice_svc_class;
-static bool reg_dummy_sess;
-static void *dummy_q6_mvm;
-static void *dummy_q6_cvs;
-dev_t device_num;
-
-static int voice_svc_dummy_reg(void);
-static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data,
-					void *priv);
-
-static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
-{
-	struct voice_svc_prvt *prtd;
-	struct apr_response_list *response_list;
-	unsigned long spin_flags;
-
-	if ((data == NULL) || (priv == NULL)) {
-		pr_err("%s: data or priv is NULL\n", __func__);
-
-		return -EINVAL;
-	}
-
-	prtd = (struct voice_svc_prvt *)priv;
-	if (prtd == NULL) {
-		pr_err("%s: private data is NULL\n", __func__);
-
-		return -EINVAL;
-	}
-
-	pr_debug("%s: data->opcode %x\n", __func__,
-		 data->opcode);
-
-	if (data->opcode == RESET_EVENTS) {
-		if (data->reset_proc == APR_DEST_QDSP6) {
-			pr_debug("%s: Received ADSP reset event\n", __func__);
-
-			if (prtd->apr_q6_mvm != NULL) {
-				apr_reset(prtd->apr_q6_mvm);
-				prtd->apr_q6_mvm = NULL;
-			}
-
-			if (prtd->apr_q6_cvs != NULL) {
-				apr_reset(prtd->apr_q6_cvs);
-				prtd->apr_q6_cvs = NULL;
-			}
-		} else if (data->reset_proc == APR_DEST_MODEM) {
-			pr_debug("%s: Received Modem reset event\n", __func__);
-		}
-		/* Set the remaining member variables to default values
-		 * for RESET_EVENTS
-		 */
-		data->payload_size = 0;
-		data->payload = NULL;
-		data->src_port = 0;
-		data->dest_port = 0;
-		data->token = 0;
-	}
-
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	if (prtd->response_count < APR_MAX_RESPONSE) {
-		response_list = kmalloc(sizeof(struct apr_response_list) +
-					data->payload_size, GFP_ATOMIC);
-		if (response_list == NULL) {
-			spin_unlock_irqrestore(&prtd->response_lock,
-					       spin_flags);
-			return -ENOMEM;
-		}
-
-		response_list->resp.src_port = data->src_port;
-
-		/* Reverting the bit manipulation done in voice_svc_update_hdr
-		 * to the src_port which is returned to us as dest_port.
-		 */
-		response_list->resp.dest_port = ((data->dest_port) >> 8);
-		response_list->resp.token = data->token;
-		response_list->resp.opcode = data->opcode;
-		response_list->resp.payload_size = data->payload_size;
-		if (data->payload != NULL && data->payload_size > 0) {
-			memcpy(response_list->resp.payload, data->payload,
-			       data->payload_size);
-		}
-
-		list_add_tail(&response_list->list, &prtd->response_queue);
-		prtd->response_count++;
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-
-		wake_up(&prtd->response_wait);
-	} else {
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-		pr_err("%s: Response dropped since the queue is full\n",
-		       __func__);
-	}
-
-	return 0;
-}
-
-static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv)
-{
-	/* Do Nothing */
-	return 0;
-}
-
-static void voice_svc_update_hdr(struct voice_svc_cmd_request *apr_req_data,
-				 struct apr_data *aprdata)
-{
-
-	aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
-				       APR_HDR_LEN(sizeof(struct apr_hdr)),
-				       APR_PKT_VER);
-	/* Bit manipulation is done on src_port so that a unique ID is sent.
-	 * This manipulation can be used in the future where the same service
-	 * is tried to open multiple times with the same src_port. At that
-	 * time 0x0001 can be replaced with other values depending on the
-	 * count.
-	 */
-	aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001);
-	aprdata->hdr.dest_port = apr_req_data->dest_port;
-	aprdata->hdr.token = apr_req_data->token;
-	aprdata->hdr.opcode = apr_req_data->opcode;
-	aprdata->hdr.pkt_size  = APR_PKT_SIZE(APR_HDR_SIZE,
-					apr_req_data->payload_size);
-	memcpy(aprdata->payload, apr_req_data->payload,
-	       apr_req_data->payload_size);
-}
-
-static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
-			      struct voice_svc_prvt *prtd)
-{
-	int ret = 0;
-	void *apr_handle = NULL;
-	struct apr_data *aprdata = NULL;
-	uint32_t user_payload_size;
-	uint32_t payload_size;
-
-	pr_debug("%s\n", __func__);
-
-	if (apr_request == NULL) {
-		pr_err("%s: apr_request is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	user_payload_size = apr_request->payload_size;
-	payload_size = sizeof(struct apr_data) + user_payload_size;
-
-	if (payload_size <= user_payload_size) {
-		pr_err("%s: invalid payload size ( 0x%x ).\n",
-			__func__, user_payload_size);
-		ret = -EINVAL;
-		goto done;
-	} else {
-		aprdata = kmalloc(payload_size, GFP_KERNEL);
-		if (aprdata == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-	}
-
-	voice_svc_update_hdr(apr_request, aprdata);
-
-	if (!strcmp(apr_request->svc_name, VOICE_SVC_CVS_STR)) {
-		apr_handle = prtd->apr_q6_cvs;
-	} else if (!strcmp(apr_request->svc_name, VOICE_SVC_MVM_STR)) {
-		apr_handle = prtd->apr_q6_mvm;
-	} else {
-		pr_err("%s: Invalid service %.*s\n", __func__,
-			MAX_APR_SERVICE_NAME_LEN, apr_request->svc_name);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata);
-
-	if (ret < 0) {
-		pr_err("%s: Fail in sending request %d\n",
-			__func__, ret);
-		ret = -EINVAL;
-	} else {
-		pr_debug("%s: apr packet sent successfully %d\n",
-			 __func__, ret);
-		ret = 0;
-	}
-
-done:
-	kfree(aprdata);
-	return ret;
-}
-static int voice_svc_reg(char *svc, uint32_t src_port,
-			 struct voice_svc_prvt *prtd, void **handle)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	if (handle == NULL) {
-		pr_err("%s: handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (*handle != NULL) {
-		pr_err("%s: svc handle not NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (src_port == (APR_MAX_PORTS - 1)) {
-		pr_err("%s: SRC port reserved for dummy session\n", __func__);
-		pr_err("%s: Unable to register %s\n", __func__, svc);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	*handle = apr_register("ADSP",
-			       svc, qdsp_apr_callback,
-			       ((src_port) << 8 | 0x0001),
-			       prtd);
-
-	if (*handle == NULL) {
-		pr_err("%s: Unable to register %s\n",
-		       __func__, svc);
-
-		ret = -EFAULT;
-		goto done;
-	}
-	pr_debug("%s: Register %s successful\n",
-		__func__, svc);
-done:
-	return ret;
-}
-
-static int voice_svc_dereg(char *svc, void **handle)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	if (handle == NULL) {
-		pr_err("%s: handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (*handle == NULL) {
-		pr_err("%s: svc handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = apr_deregister(*handle);
-	if (ret) {
-		pr_err("%s: Unable to deregister service %s; error: %d\n",
-		       __func__, svc, ret);
-
-		goto done;
-	}
-	*handle = NULL;
-	pr_debug("%s: deregister %s successful\n", __func__, svc);
-
-done:
-	return ret;
-}
-
-static int process_reg_cmd(struct voice_svc_register *apr_reg_svc,
-			   struct voice_svc_prvt *prtd)
-{
-	int ret = 0;
-	char *svc = NULL;
-	void **handle = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_MVM_STR)) {
-		svc = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_mvm;
-	} else if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_CVS_STR)) {
-		svc = VOICE_SVC_CVS_STR;
-		handle = &prtd->apr_q6_cvs;
-	} else {
-		pr_err("%s: Invalid Service: %.*s\n", __func__,
-			MAX_APR_SERVICE_NAME_LEN, apr_reg_svc->svc_name);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (apr_reg_svc->reg_flag) {
-		ret = voice_svc_reg(svc, apr_reg_svc->src_port, prtd,
-				    handle);
-	} else if (!apr_reg_svc->reg_flag) {
-		ret = voice_svc_dereg(svc, handle);
-	}
-
-done:
-	return ret;
-}
-
-static ssize_t voice_svc_write(struct file *file, const char __user *buf,
-			       size_t count, loff_t *ppos)
-{
-	int ret = 0;
-	struct voice_svc_prvt *prtd;
-	struct voice_svc_write_msg *data = NULL;
-	uint32_t cmd;
-	struct voice_svc_register *register_data = NULL;
-	struct voice_svc_cmd_request *request_data = NULL;
-	uint32_t request_payload_size;
-
-	pr_debug("%s\n", __func__);
-
-	/*
-	 * Check if enough memory is allocated to parse the message type.
-	 * Will check there is enough to hold the payload later.
-	 */
-	if (count >= sizeof(struct voice_svc_write_msg)) {
-		data = kmalloc(count, GFP_KERNEL);
-	} else {
-		pr_debug("%s: invalid data size\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (data == NULL) {
-		pr_err("%s: data kmalloc failed.\n", __func__);
-
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	ret = copy_from_user(data, buf, count);
-	if (ret) {
-		pr_err("%s: copy_from_user failed %d\n", __func__, ret);
-
-		ret = -EPERM;
-		goto done;
-	}
-
-	cmd = data->msg_type;
-	prtd = (struct voice_svc_prvt *) file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	switch (cmd) {
-	case MSG_REGISTER:
-		/*
-		 * Check that count reflects the expected size to ensure
-		 * sufficient memory was allocated. Since voice_svc_register
-		 * has a static size, this should be exact.
-		 */
-		if (count == (sizeof(struct voice_svc_write_msg) +
-			      sizeof(struct voice_svc_register))) {
-			register_data =
-				(struct voice_svc_register *)data->payload;
-			if (register_data == NULL) {
-				pr_err("%s: register data is NULL", __func__);
-				ret = -EINVAL;
-				goto done;
-			}
-			ret = process_reg_cmd(register_data, prtd);
-			if (!ret)
-				ret = count;
-		} else {
-			pr_err("%s: invalid data payload size for register command\n",
-				__func__);
-			ret = -EINVAL;
-			goto done;
-		}
-		break;
-	case MSG_REQUEST:
-		/*
-		 * Check that count reflects the expected size to ensure
-		 * sufficient memory was allocated. Since voice_svc_cmd_request
-		 * has a variable size, check the minimum value count must be to
-		 * parse the message request then check the minimum size to hold
-		 * the payload of the message request.
-		 */
-		if (count >= (sizeof(struct voice_svc_write_msg) +
-			      sizeof(struct voice_svc_cmd_request))) {
-			request_data =
-				(struct voice_svc_cmd_request *)data->payload;
-			if (request_data == NULL) {
-				pr_err("%s: request data is NULL", __func__);
-				ret = -EINVAL;
-				goto done;
-			}
-
-			request_payload_size = request_data->payload_size;
-
-			if (count >= (sizeof(struct voice_svc_write_msg) +
-				      sizeof(struct voice_svc_cmd_request) +
-				      request_payload_size)) {
-				ret = voice_svc_send_req(request_data, prtd);
-				if (!ret)
-					ret = count;
-			} else {
-				pr_err("%s: invalid request payload size\n",
-					__func__);
-				ret = -EINVAL;
-				goto done;
-			}
-		} else {
-			pr_err("%s: invalid data payload size for request command\n",
-				__func__);
-			ret = -EINVAL;
-			goto done;
-		}
-		break;
-	default:
-		pr_debug("%s: Invalid command: %u\n", __func__, cmd);
-		ret = -EINVAL;
-	}
-
-done:
-	kfree(data);
-	return ret;
-}
-
-static ssize_t voice_svc_read(struct file *file, char __user *arg,
-			      size_t count, loff_t *ppos)
-{
-	int ret = 0;
-	struct voice_svc_prvt *prtd;
-	struct apr_response_list *resp;
-	unsigned long spin_flags;
-	int size;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = (struct voice_svc_prvt *)file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	mutex_lock(&prtd->response_mutex_lock);
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	if (list_empty(&prtd->response_queue)) {
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-		pr_debug("%s: wait for a response\n", __func__);
-
-		ret = wait_event_interruptible_timeout(prtd->response_wait,
-					!list_empty(&prtd->response_queue),
-					msecs_to_jiffies(TIMEOUT_MS));
-		if (ret == 0) {
-			pr_debug("%s: Read timeout\n", __func__);
-
-			ret = -ETIMEDOUT;
-			goto unlock;
-		} else if (ret > 0 && !list_empty(&prtd->response_queue)) {
-			pr_debug("%s: Interrupt received for response\n",
-				 __func__);
-		} else if (ret < 0) {
-			pr_debug("%s: Interrupted by SIGNAL %d\n",
-				 __func__, ret);
-
-			goto unlock;
-		}
-
-		spin_lock_irqsave(&prtd->response_lock, spin_flags);
-	}
-
-	resp = list_first_entry(&prtd->response_queue,
-				struct apr_response_list, list);
-
-	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-
-	size = resp->resp.payload_size +
-	       sizeof(struct voice_svc_cmd_response);
-
-	if (count < size) {
-		pr_err("%s: Invalid payload size %zd, %d\n",
-		       __func__, count, size);
-
-		ret = -ENOMEM;
-		goto unlock;
-	}
-
-	if (!access_ok(VERIFY_WRITE, arg, size)) {
-		pr_err("%s: Access denied to write\n",
-		       __func__);
-
-		ret = -EPERM;
-		goto unlock;
-	}
-
-	ret = copy_to_user(arg, &resp->resp,
-			 sizeof(struct voice_svc_cmd_response) +
-			 resp->resp.payload_size);
-	if (ret) {
-		pr_err("%s: copy_to_user failed %d\n", __func__, ret);
-
-		ret = -EPERM;
-		goto unlock;
-	}
-
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	list_del(&resp->list);
-	prtd->response_count--;
-	kfree(resp);
-
-	spin_unlock_irqrestore(&prtd->response_lock,
-				spin_flags);
-
-	ret = count;
-
-unlock:
-	mutex_unlock(&prtd->response_mutex_lock);
-done:
-	return ret;
-}
-
-static int voice_svc_dummy_reg(void)
-{
-	uint32_t src_port = APR_MAX_PORTS - 1;
-
-	pr_debug("%s\n", __func__);
-	dummy_q6_mvm = apr_register("ADSP", "MVM",
-				qdsp_dummy_apr_callback,
-				src_port,
-				NULL);
-	if (dummy_q6_mvm == NULL) {
-		pr_err("%s: Unable to register dummy MVM\n", __func__);
-		goto err;
-	}
-
-	dummy_q6_cvs = apr_register("ADSP", "CVS",
-				qdsp_dummy_apr_callback,
-				src_port,
-				NULL);
-	if (dummy_q6_cvs == NULL) {
-		pr_err("%s: Unable to register dummy CVS\n", __func__);
-		goto err;
-	}
-	return 0;
-err:
-	if (dummy_q6_mvm != NULL) {
-		apr_deregister(dummy_q6_mvm);
-		dummy_q6_mvm = NULL;
-	}
-	return -EINVAL;
-}
-
-static int voice_svc_open(struct inode *inode, struct file *file)
-{
-	struct voice_svc_prvt *prtd = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL);
-
-	if (prtd == NULL)
-		return -ENOMEM;
-
-	memset(prtd, 0, sizeof(struct voice_svc_prvt));
-	prtd->apr_q6_cvs = NULL;
-	prtd->apr_q6_mvm = NULL;
-	prtd->response_count = 0;
-	INIT_LIST_HEAD(&prtd->response_queue);
-	init_waitqueue_head(&prtd->response_wait);
-	spin_lock_init(&prtd->response_lock);
-	mutex_init(&prtd->response_mutex_lock);
-	file->private_data = (void *)prtd;
-
-	/* Current APR implementation doesn't support session based
-	 * multiple service registrations. The apr_deregister()
-	 * function sets the destination and client IDs to zero, if
-	 * deregister is called for a single service instance.
-	 * To avoid this, register for additional services.
-	 */
-	if (!reg_dummy_sess) {
-		voice_svc_dummy_reg();
-		reg_dummy_sess = 1;
-	}
-	return 0;
-}
-
-static int voice_svc_release(struct inode *inode, struct file *file)
-{
-	int ret = 0;
-	struct apr_response_list *resp = NULL;
-	unsigned long spin_flags;
-	struct voice_svc_prvt *prtd = NULL;
-	char *svc_name = NULL;
-	void **handle = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = (struct voice_svc_prvt *)file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (prtd->apr_q6_cvs != NULL) {
-		svc_name = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_cvs;
-		ret = voice_svc_dereg(svc_name, handle);
-		if (ret)
-			pr_err("%s: Failed to dereg CVS %d\n", __func__, ret);
-	}
-
-	if (prtd->apr_q6_mvm != NULL) {
-		svc_name = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_mvm;
-		ret = voice_svc_dereg(svc_name, handle);
-		if (ret)
-			pr_err("%s: Failed to dereg MVM %d\n", __func__, ret);
-	}
-
-	mutex_lock(&prtd->response_mutex_lock);
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	while (!list_empty(&prtd->response_queue)) {
-		pr_debug("%s: Remove item from response queue\n", __func__);
-
-		resp = list_first_entry(&prtd->response_queue,
-					struct apr_response_list, list);
-		list_del(&resp->list);
-		prtd->response_count--;
-		kfree(resp);
-	}
-
-	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-	mutex_unlock(&prtd->response_mutex_lock);
-
-	mutex_destroy(&prtd->response_mutex_lock);
-
-	kfree(file->private_data);
-	file->private_data = NULL;
-
-done:
-	return ret;
-}
-
-static const struct file_operations voice_svc_fops = {
-	.owner =                THIS_MODULE,
-	.open =                 voice_svc_open,
-	.read =                 voice_svc_read,
-	.write =                voice_svc_write,
-	.release =              voice_svc_release,
-};
-
-
-static int voice_svc_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	voice_svc_dev = devm_kzalloc(&pdev->dev,
-				  sizeof(struct voice_svc_device), GFP_KERNEL);
-	if (!voice_svc_dev) {
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER,
-				  VOICE_SVC_DRIVER_NAME);
-	if (ret) {
-		pr_err("%s: Failed to alloc chrdev\n", __func__);
-		ret = -ENODEV;
-		goto chrdev_err;
-	}
-
-	voice_svc_dev->major = MAJOR(device_num);
-	voice_svc_class = class_create(THIS_MODULE, VOICE_SVC_DRIVER_NAME);
-	if (IS_ERR(voice_svc_class)) {
-		ret = PTR_ERR(voice_svc_class);
-		pr_err("%s: Failed to create class; err = %d\n", __func__,
-			ret);
-		goto class_err;
-	}
-
-	voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num,
-					   NULL, VOICE_SVC_DRIVER_NAME);
-	if (IS_ERR(voice_svc_dev->dev)) {
-		ret = PTR_ERR(voice_svc_dev->dev);
-		pr_err("%s: Failed to create device; err = %d\n", __func__,
-			ret);
-		goto dev_err;
-	}
-
-	voice_svc_dev->cdev = cdev_alloc();
-	if (!voice_svc_dev->cdev) {
-		pr_err("%s: Failed to alloc cdev\n", __func__);
-		ret = -ENOMEM;
-		goto cdev_alloc_err;
-	}
-
-	cdev_init(voice_svc_dev->cdev, &voice_svc_fops);
-	ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER);
-	if (ret) {
-		pr_err("%s: Failed to register chrdev; err = %d\n", __func__,
-			ret);
-		goto add_err;
-	}
-	pr_debug("%s: Device created\n", __func__);
-	goto done;
-
-add_err:
-	cdev_del(voice_svc_dev->cdev);
-cdev_alloc_err:
-	device_destroy(voice_svc_class, device_num);
-dev_err:
-	class_destroy(voice_svc_class);
-class_err:
-	unregister_chrdev_region(0, MINOR_NUMBER);
-chrdev_err:
-	kfree(voice_svc_dev);
-done:
-	return ret;
-}
-
-static int voice_svc_remove(struct platform_device *pdev)
-{
-	pr_debug("%s\n", __func__);
-
-	cdev_del(voice_svc_dev->cdev);
-	kfree(voice_svc_dev->cdev);
-	device_destroy(voice_svc_class, device_num);
-	class_destroy(voice_svc_class);
-	unregister_chrdev_region(0, MINOR_NUMBER);
-	kfree(voice_svc_dev);
-
-	return 0;
-}
-
-static const struct of_device_id voice_svc_of_match[] = {
-	{.compatible = "qcom,msm-voice-svc"},
-	{ }
-};
-MODULE_DEVICE_TABLE(of, voice_svc_of_match);
-
-static struct platform_driver voice_svc_driver = {
-	.probe          = voice_svc_probe,
-	.remove         = voice_svc_remove,
-	.driver         = {
-		.name   = "msm-voice-svc",
-		.owner  = THIS_MODULE,
-		.of_match_table = voice_svc_of_match,
-	},
-};
-
-static int __init voice_svc_init(void)
-{
-	pr_debug("%s\n", __func__);
-
-	return platform_driver_register(&voice_svc_driver);
-}
-
-static void __exit voice_svc_exit(void)
-{
-	pr_debug("%s\n", __func__);
-
-	platform_driver_unregister(&voice_svc_driver);
-}
-
-module_init(voice_svc_init);
-module_exit(voice_svc_exit);
-
-MODULE_DESCRIPTION("Soc QDSP6v2 Voice Service driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 37125c0..7da9211 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -909,13 +909,11 @@
 	struct cpufreq_cooling_device *cpufreq_dev;
 	char dev_name[THERMAL_NAME_LENGTH];
 	struct cpufreq_frequency_table *pos, *table;
-	struct cpumask temp_mask;
 	unsigned int freq, i, num_cpus;
 	int ret;
 	struct thermal_cooling_device_ops *cooling_ops;
 
-	cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
-	policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
+	policy = cpufreq_cpu_get(cpumask_first(clip_cpus));
 	if (!policy) {
 		pr_debug("%s: CPUFreq policy not found\n", __func__);
 		return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 8d706cd..342160e 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -34,6 +34,7 @@
 #include <linux/qpnp/qpnp-adc.h>
 #include <linux/thermal.h>
 #include <linux/platform_device.h>
+#include "thermal_core.h"
 
 /* QPNP VADC TM register definition */
 #define QPNP_REVISION3					0x2
@@ -41,126 +42,15 @@
 #define QPNP_PERPH_TYPE2				0x2
 #define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT		2
 #define QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT		0x22
-#define QPNP_STATUS1					0x8
-#define QPNP_STATUS1_OP_MODE				4
-#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS		BIT(2)
-#define QPNP_STATUS1_REQ_STS				BIT(1)
-#define QPNP_STATUS1_EOC				BIT(0)
-#define QPNP_STATUS2					0x9
-#define QPNP_STATUS2_CONV_SEQ_STATE			6
-#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG		BIT(1)
-#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS		BIT(0)
-#define QPNP_CONV_TIMEOUT_ERR				2
-
-#define QPNP_MODE_CTL					0x40
-#define QPNP_OP_MODE_SHIFT				3
-#define QPNP_VREF_XO_THM_FORCE				BIT(2)
-#define QPNP_AMUX_TRIM_EN				BIT(1)
-#define QPNP_ADC_TRIM_EN				BIT(0)
 #define QPNP_EN_CTL1					0x46
 #define QPNP_ADC_TM_EN					BIT(7)
 #define QPNP_BTM_CONV_REQ				0x47
 #define QPNP_ADC_CONV_REQ_EN				BIT(7)
 
-#define QPNP_ADC_CH_SEL_CTL				0x48
-#define QPNP_ADC_DIG_PARAM				0x50
-#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT		3
-#define QPNP_HW_SETTLE_DELAY				0x51
+#define QPNP_OP_MODE_SHIFT				3
 #define QPNP_CONV_REQ					0x52
 #define QPNP_CONV_REQ_SET				BIT(7)
-#define QPNP_CONV_SEQ_CTL				0x54
-#define QPNP_CONV_SEQ_HOLDOFF_SHIFT			4
-#define QPNP_CONV_SEQ_TRIG_CTL				0x55
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL			0x57
-#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2			0x58
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
 
-#define QPNP_ADC_MEAS_INTERVAL_OP_CTL			0x59
-#define QPNP_ADC_MEAS_INTERVAL_OP			BIT(7)
-
-#define QPNP_FAST_AVG_CTL				0x5a
-#define QPNP_FAST_AVG_EN				0x5b
-#define QPNP_FAST_AVG_ENABLED				BIT(7)
-
-#define QPNP_M0_LOW_THR_LSB				0x5c
-#define QPNP_M0_LOW_THR_MSB				0x5d
-#define QPNP_M0_HIGH_THR_LSB				0x5e
-#define QPNP_M0_HIGH_THR_MSB				0x5f
-#define QPNP_M1_ADC_CH_SEL_CTL				0x68
-#define QPNP_M1_LOW_THR_LSB				0x69
-#define QPNP_M1_LOW_THR_MSB				0x6a
-#define QPNP_M1_HIGH_THR_LSB				0x6b
-#define QPNP_M1_HIGH_THR_MSB				0x6c
-#define QPNP_M2_ADC_CH_SEL_CTL				0x70
-#define QPNP_M2_LOW_THR_LSB				0x71
-#define QPNP_M2_LOW_THR_MSB				0x72
-#define QPNP_M2_HIGH_THR_LSB				0x73
-#define QPNP_M2_HIGH_THR_MSB				0x74
-#define QPNP_M3_ADC_CH_SEL_CTL				0x78
-#define QPNP_M3_LOW_THR_LSB				0x79
-#define QPNP_M3_LOW_THR_MSB				0x7a
-#define QPNP_M3_HIGH_THR_LSB				0x7b
-#define QPNP_M3_HIGH_THR_MSB				0x7c
-#define QPNP_M4_ADC_CH_SEL_CTL				0x80
-#define QPNP_M4_LOW_THR_LSB				0x81
-#define QPNP_M4_LOW_THR_MSB				0x82
-#define QPNP_M4_HIGH_THR_LSB				0x83
-#define QPNP_M4_HIGH_THR_MSB				0x84
-#define QPNP_M5_ADC_CH_SEL_CTL				0x88
-#define QPNP_M5_LOW_THR_LSB				0x89
-#define QPNP_M5_LOW_THR_MSB				0x8a
-#define QPNP_M5_HIGH_THR_LSB				0x8b
-#define QPNP_M5_HIGH_THR_MSB				0x8c
-#define QPNP_M6_ADC_CH_SEL_CTL				0x90
-#define QPNP_M6_LOW_THR_LSB				0x91
-#define QPNP_M6_LOW_THR_MSB				0x92
-#define QPNP_M6_HIGH_THR_LSB				0x93
-#define QPNP_M6_HIGH_THR_MSB				0x94
-#define QPNP_M7_ADC_CH_SEL_CTL				0x98
-#define QPNP_M7_LOW_THR_LSB				0x99
-#define QPNP_M7_LOW_THR_MSB				0x9a
-#define QPNP_M7_HIGH_THR_LSB				0x9b
-#define QPNP_M7_HIGH_THR_MSB				0x9c
-
-#define QPNP_ADC_TM_MULTI_MEAS_EN			0x41
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M0			BIT(0)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M1			BIT(1)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M2			BIT(2)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M3			BIT(3)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M4			BIT(4)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M5			BIT(5)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M6			BIT(6)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M7			BIT(7)
-#define QPNP_ADC_TM_LOW_THR_INT_EN			0x42
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M0			BIT(0)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M1			BIT(1)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M2			BIT(2)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M3			BIT(3)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M4			BIT(4)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M5			BIT(5)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M6			BIT(6)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M7			BIT(7)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN			0x43
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M0			BIT(0)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M1			BIT(1)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M2			BIT(2)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M3			BIT(3)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M4			BIT(4)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M5			BIT(5)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M6			BIT(6)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M7			BIT(7)
-
-#define QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL			0x59
-#define QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL			0x6d
-#define QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL			0x75
-#define QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL			0x7d
-#define QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL			0x85
-#define QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL			0x8d
-#define QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL			0x95
-#define QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL			0x9d
 #define QPNP_ADC_TM_STATUS1				0x8
 #define QPNP_ADC_TM_STATUS_LOW				0xa
 #define QPNP_ADC_TM_STATUS_HIGH				0xb
@@ -172,22 +62,22 @@
 #define QPNP_ADC_TM_THR_LSB_MASK(val)			(val & 0xff)
 #define QPNP_ADC_TM_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
 
-#define QPNP_MIN_TIME			2000
-#define QPNP_MAX_TIME			2100
-#define QPNP_RETRY			1000
-
 /* QPNP ADC TM HC start */
-#define QPNP_BTM_HC_STATUS1		0x08
-#define QPNP_BTM_HC_STATUS_LOW		0x0a
-#define QPNP_BTM_HC_STATUS_HIGH		0x0b
+#define QPNP_BTM_HC_STATUS1				0x08
+#define QPNP_BTM_HC_STATUS_LOW				0x0a
+#define QPNP_BTM_HC_STATUS_HIGH				0x0b
 
-#define QPNP_BTM_HC_ADC_DIG_PARAM	0x42
-#define QPNP_BTM_HC_FAST_AVG_CTL	0x43
-#define QPNP_BTM_EN_CTL1		0x46
-#define QPNP_BTM_CONV_REQ		0x47
+#define QPNP_BTM_HC_ADC_DIG_PARAM			0x42
+#define QPNP_BTM_HC_FAST_AVG_CTL			0x43
+#define QPNP_BTM_EN_CTL1				0x46
+#define QPNP_BTM_CONV_REQ				0x47
 
-#define QPNP_BTM_MEAS_INTERVAL_CTL	0x50
-#define QPNP_BTM_MEAS_INTERVAL_CTL2	0x51
+#define QPNP_BTM_MEAS_INTERVAL_CTL			0x50
+#define QPNP_BTM_MEAS_INTERVAL_CTL2			0x51
+#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
 
 #define QPNP_BTM_Mn_ADC_CH_SEL_CTL(n)		((n * 8) + 0x60)
 #define QPNP_BTM_Mn_LOW_THR0(n)			((n * 8) + 0x61)
@@ -208,6 +98,7 @@
 
 #define QPNP_BTM_Mn_DATA0(n)			((n * 2) + 0xa0)
 #define QPNP_BTM_Mn_DATA1(n)			((n * 2) + 0xa1)
+#define QPNP_BTM_CHANNELS			8
 
 /* QPNP ADC TM HC end */
 
@@ -277,69 +168,6 @@
 
 LIST_HEAD(qpnp_adc_tm_device_list);
 
-struct qpnp_adc_tm_trip_reg_type {
-	enum qpnp_adc_tm_channel_select	btm_amux_chan;
-	uint16_t			low_thr_lsb_addr;
-	uint16_t			low_thr_msb_addr;
-	uint16_t			high_thr_lsb_addr;
-	uint16_t			high_thr_msb_addr;
-	u8				multi_meas_en;
-	u8				low_thr_int_chan_en;
-	u8				high_thr_int_chan_en;
-	u8				meas_interval_ctl;
-};
-
-static struct qpnp_adc_tm_trip_reg_type adc_tm_data[] = {
-	[QPNP_ADC_TM_CHAN0] = {QPNP_ADC_TM_M0_ADC_CH_SEL_CTL,
-		QPNP_M0_LOW_THR_LSB,
-		QPNP_M0_LOW_THR_MSB, QPNP_M0_HIGH_THR_LSB,
-		QPNP_M0_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M0,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M0, QPNP_ADC_TM_HIGH_THR_INT_EN_M0,
-		QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN1] = {QPNP_ADC_TM_M1_ADC_CH_SEL_CTL,
-		QPNP_M1_LOW_THR_LSB,
-		QPNP_M1_LOW_THR_MSB, QPNP_M1_HIGH_THR_LSB,
-		QPNP_M1_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M1,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M1, QPNP_ADC_TM_HIGH_THR_INT_EN_M1,
-		QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN2] = {QPNP_ADC_TM_M2_ADC_CH_SEL_CTL,
-		QPNP_M2_LOW_THR_LSB,
-		QPNP_M2_LOW_THR_MSB, QPNP_M2_HIGH_THR_LSB,
-		QPNP_M2_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M2,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M2, QPNP_ADC_TM_HIGH_THR_INT_EN_M2,
-		QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN3] = {QPNP_ADC_TM_M3_ADC_CH_SEL_CTL,
-		QPNP_M3_LOW_THR_LSB,
-		QPNP_M3_LOW_THR_MSB, QPNP_M3_HIGH_THR_LSB,
-		QPNP_M3_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M3,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M3, QPNP_ADC_TM_HIGH_THR_INT_EN_M3,
-		QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN4] = {QPNP_ADC_TM_M4_ADC_CH_SEL_CTL,
-		QPNP_M4_LOW_THR_LSB,
-		QPNP_M4_LOW_THR_MSB, QPNP_M4_HIGH_THR_LSB,
-		QPNP_M4_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M4,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M4, QPNP_ADC_TM_HIGH_THR_INT_EN_M4,
-		QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN5] = {QPNP_ADC_TM_M5_ADC_CH_SEL_CTL,
-		QPNP_M5_LOW_THR_LSB,
-		QPNP_M5_LOW_THR_MSB, QPNP_M5_HIGH_THR_LSB,
-		QPNP_M5_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M5,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M5, QPNP_ADC_TM_HIGH_THR_INT_EN_M5,
-		QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN6] = {QPNP_ADC_TM_M6_ADC_CH_SEL_CTL,
-		QPNP_M6_LOW_THR_LSB,
-		QPNP_M6_LOW_THR_MSB, QPNP_M6_HIGH_THR_LSB,
-		QPNP_M6_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M6,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M6, QPNP_ADC_TM_HIGH_THR_INT_EN_M6,
-		QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN7] = {QPNP_ADC_TM_M7_ADC_CH_SEL_CTL,
-		QPNP_M7_LOW_THR_LSB,
-		QPNP_M7_LOW_THR_MSB, QPNP_M7_HIGH_THR_LSB,
-		QPNP_M7_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M7,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M7, QPNP_ADC_TM_HIGH_THR_INT_EN_M7,
-		QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL},
-};
-
 static struct qpnp_adc_tm_reverse_scale_fn adc_tm_rscale_fn[] = {
 	[SCALE_R_VBATT] = {qpnp_adc_vbatt_rscaler},
 	[SCALE_RBATT_THERM] = {qpnp_adc_btm_scaler},
@@ -380,33 +208,6 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_fast_avg_en(struct qpnp_adc_tm_chip *chip,
-				uint32_t *fast_avg_sample)
-{
-	int rc = 0, version = 0;
-	u8 fast_avg_en = 0;
-
-	version = qpnp_adc_get_revid_version(chip->dev);
-	if (!((version == QPNP_REV_ID_8916_1_0) ||
-		(version == QPNP_REV_ID_8916_1_1) ||
-		(version == QPNP_REV_ID_8916_2_0))) {
-		pr_debug("fast-avg-en not required for this version\n");
-		return rc;
-	}
-
-	fast_avg_en = QPNP_FAST_AVG_ENABLED;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_EN, fast_avg_en, 1);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg enable err\n");
-		return rc;
-	}
-
-	if (*fast_avg_sample >= 3)
-		*fast_avg_sample = 2;
-
-	return rc;
-}
-
 static int qpnp_adc_tm_check_vreg_vote(struct qpnp_adc_tm_chip *chip)
 {
 	int rc = 0;
@@ -443,13 +244,11 @@
 		return rc;
 	}
 
-	if (chip->adc_tm_hc) {
-		data = QPNP_ADC_CONV_REQ_EN;
-		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-		if (rc < 0) {
-			pr_err("adc-tm enable failed\n");
-			return rc;
-		}
+	data = QPNP_ADC_CONV_REQ_EN;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
 	}
 
 	return rc;
@@ -460,12 +259,10 @@
 	u8 data = 0;
 	int rc = 0;
 
-	if (chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-		if (rc < 0) {
-			pr_err("adc-tm enable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
@@ -538,132 +335,11 @@
 static int32_t qpnp_adc_tm_enable_if_channel_meas(
 					struct qpnp_adc_tm_chip *chip)
 {
-	u8 adc_tm_meas_en = 0, status_low = 0, status_high = 0;
 	int rc = 0;
 
-	if (chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_rc_check_channel_en(chip);
-		if (rc) {
-			pr_err("adc_tm channel check failed\n");
-			return rc;
-		}
-	} else {
-		/* Check if a measurement request is still required */
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							&adc_tm_meas_en, 1);
-		if (rc) {
-			pr_err("read status high failed with %d\n", rc);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							&status_low, 1);
-		if (rc) {
-			pr_err("read status low failed with %d\n", rc);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							&status_high, 1);
-		if (rc) {
-			pr_err("read status high failed with %d\n", rc);
-			return rc;
-		}
-
-		/* Enable only if there are pending measurement requests */
-		if ((adc_tm_meas_en && status_high) ||
-				(adc_tm_meas_en && status_low)) {
-			qpnp_adc_tm_enable(chip);
-
-			/* Request conversion */
-			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
-							QPNP_CONV_REQ_SET, 1);
-			if (rc < 0) {
-				pr_err("adc-tm request conversion failed\n");
-				return rc;
-			}
-		} else {
-			/* disable the vote if applicable */
-			if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
-					chip->adc->hkadc_ldo_ok) {
-				qpnp_adc_disable_voltage(chip->adc);
-				chip->adc_vote_enable = false;
-			}
-		}
-	}
-
-	return rc;
-}
-
-static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
-								u8 mode_ctl)
-{
-	int rc;
-
-	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
-
-	/* VADC_BTM current sets mode to recurring measurements */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl, 1);
-	if (rc < 0)
-		pr_err("adc-tm write mode selection err\n");
-
-	return rc;
-}
-
-static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
-{
-	u8 status1 = 0, mode_ctl = 0;
-	int rc, count = 0;
-
-	/* Re-enable the peripheral */
-	rc = qpnp_adc_tm_enable(chip);
+	rc = qpnp_adc_tm_rc_check_channel_en(chip);
 	if (rc) {
-		pr_err("adc-tm re-enable peripheral failed\n");
-		return rc;
-	}
-
-	/* The VADC_TM bank needs to be disabled for new conversion request */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return rc;
-	}
-
-	/* Disable the bank if a conversion is occurring */
-	while (status1 & QPNP_STATUS1_REQ_STS) {
-		if (count > QPNP_RETRY) {
-			pr_err("retry error=%d with 0x%x\n", count, status1);
-			break;
-		}
-		/*
-		 * Wait time is based on the optimum sampling rate
-		 * and adding enough time buffer to account for ADC conversions
-		 * occurring on different peripheral banks
-		 */
-		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1,
-							&status1, 1);
-		if (rc < 0) {
-			pr_err("adc-tm disable failed\n");
-			return rc;
-		}
-		count++;
-	}
-
-	if (!chip->adc_tm_hc) {
-		/* Change the mode back to recurring measurement mode */
-		mode_ctl = ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm mode change to recurring failed\n");
-			return rc;
-		}
-	}
-
-	/* Disable the peripheral */
-	rc = qpnp_adc_tm_disable(chip);
-	if (rc < 0) {
-		pr_err("adc-tm peripheral disable failed\n");
+		pr_err("adc_tm channel check failed\n");
 		return rc;
 	}
 
@@ -676,20 +352,11 @@
 	int rc = 0, i;
 	bool chan_found = false;
 
-	if (!chip->adc_tm_hc) {
-		for (i = 0; i < QPNP_ADC_TM_CHAN_NONE; i++) {
-			if (adc_tm_data[i].btm_amux_chan == btm_chan) {
-				*btm_chan_idx = i;
-				chan_found = true;
-			}
-		}
-	} else {
-		for (i = 0; i < chip->max_channels_available; i++) {
-			if (chip->sensor[i].btm_channel_num == btm_chan) {
-				*btm_chan_idx = i;
-				chan_found = true;
-				break;
-			}
+	for (i = 0; i < chip->max_channels_available; i++) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			*btm_chan_idx = i;
+			chan_found = true;
+			break;
 		}
 	}
 
@@ -760,12 +427,7 @@
 
 	switch (chip->sensor[chan_idx].timer_select) {
 	case ADC_MEAS_TIMER_SELECT1:
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
-				chip->sensor[chan_idx].meas_interval, 1);
-		else
-			rc = qpnp_adc_tm_write_reg(chip,
+		rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL,
 				chip->sensor[chan_idx].meas_interval, 1);
 		if (rc < 0) {
@@ -775,12 +437,7 @@
 	break;
 	case ADC_MEAS_TIMER_SELECT2:
 		/* Thermal channels uses timer2, default to 1 second */
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_read_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				&meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_read_reg(chip,
+		rc = qpnp_adc_tm_read_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -791,12 +448,7 @@
 		timer_interval_store <<= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT;
 		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK;
 		meas_interval_timer2 |= timer_interval_store;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_write_reg(chip,
+		rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -805,12 +457,7 @@
 		}
 	break;
 	case ADC_MEAS_TIMER_SELECT3:
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_read_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				&meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_read_reg(chip,
+		rc = qpnp_adc_tm_read_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -820,11 +467,6 @@
 		timer_interval_store = chip->sensor[chan_idx].meas_interval;
 		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK;
 		meas_interval_timer2 |= timer_interval_store;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				meas_interval_timer2, 1);
-		else
 			rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				meas_interval_timer2, 1);
@@ -844,14 +486,9 @@
 		pr_err("Invalid btm channel idx\n");
 		return rc;
 	}
-	if (!chip->adc_tm_hc)
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].meas_interval_ctl,
-				chip->sensor[chan_idx].timer_select, 1);
-	else
-		rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
-				chip->sensor[chan_idx].timer_select, 1);
+	rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
+			chip->sensor[chan_idx].timer_select, 1);
 	if (rc < 0) {
 		pr_err("TM channel timer configure failed\n");
 		return rc;
@@ -934,67 +571,6 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_read_thr_value(struct qpnp_adc_tm_chip *chip,
-			uint32_t btm_chan)
-{
-	int rc = 0;
-	u8 data_lsb = 0, data_msb = 0;
-	uint32_t btm_chan_idx = 0;
-	int32_t low_thr = 0, high_thr = 0;
-
-	if (!chip->adc_tm_hc) {
-		pr_err("Not applicable for VADC HC peripheral\n");
-		return -EINVAL;
-	}
-
-	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
-	if (rc < 0) {
-		pr_err("Invalid btm channel idx\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
-			&data_lsb, 1);
-	if (rc < 0) {
-		pr_err("low threshold lsb setting failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].low_thr_msb_addr,
-		&data_msb, 1);
-	if (rc < 0) {
-		pr_err("low threshold msb setting failed\n");
-		return rc;
-	}
-
-	low_thr = (data_msb << 8) | data_lsb;
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
-		&data_lsb, 1);
-	if (rc < 0) {
-		pr_err("high threshold lsb setting failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].high_thr_msb_addr,
-		&data_msb, 1);
-	if (rc < 0) {
-		pr_err("high threshold msb setting failed\n");
-		return rc;
-	}
-
-	high_thr = (data_msb << 8) | data_lsb;
-
-	pr_debug("configured thresholds high:0x%x and low:0x%x\n",
-		high_thr, low_thr);
-
-	return rc;
-}
-
 static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
 			uint32_t btm_chan, int32_t high_thr, int32_t low_thr)
 {
@@ -1007,69 +583,36 @@
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
-			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_msb_addr,
-			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold msb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
-			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
-		if (rc < 0) {
-			pr_err("high threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].high_thr_msb_addr,
-			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
-		if (rc < 0)
-			pr_err("high threshold msb setting failed\n");
-	} else {
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
-			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
-			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold msb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
-			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
-		if (rc < 0) {
-			pr_err("high threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
-			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
-		if (rc < 0)
-			pr_err("high threshold msb setting failed\n");
-
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
+		QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed\n");
+		return rc;
 	}
 
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
+		QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
+		QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
+		QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+	if (rc < 0)
+		pr_err("high threshold msb setting failed\n");
+
 	pr_debug("client requested high:%d and low:%d\n",
 		high_thr, low_thr);
 
@@ -1206,14 +749,9 @@
 			pr_debug("low sensor mask:%x with state:%d\n",
 					sensor_mask, chan_prop->state_request);
 			/* Enable low threshold's interrupt */
-			if (!chip->adc_tm_hc)
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_ADC_TM_LOW_THR_INT_EN,
-					sensor_mask, true);
-			else
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_BTM_Mn_EN(btm_chan_idx),
-					QPNP_BTM_Mn_LOW_THR_INT_EN, true);
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, true);
 			if (rc < 0) {
 				pr_err("low thr enable err:%d\n", btm_chan);
 				return rc;
@@ -1223,14 +761,9 @@
 		if (high_thr_set) {
 			/* Enable high threshold's interrupt */
 			pr_debug("high sensor mask:%x\n", sensor_mask);
-			if (!chip->adc_tm_hc)
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_ADC_TM_HIGH_THR_INT_EN,
-					sensor_mask, true);
-			else
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_BTM_Mn_EN(btm_chan_idx),
-					QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
 			if (rc < 0) {
 				pr_err("high thr enable err:%d\n", btm_chan);
 				return rc;
@@ -1239,11 +772,7 @@
 	}
 
 	/* Enable corresponding BTM channel measurement */
-	if (!chip->adc_tm_hc)
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
-	else
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
 			QPNP_BTM_Mn_MEAS_EN, true);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
@@ -1358,135 +887,12 @@
 	return 0;
 }
 
-static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
-			struct qpnp_adc_amux_properties *chan_prop)
-{
-	u8 decimation = 0, op_cntrl = 0, mode_ctl = 0;
-	int rc = 0;
-	uint32_t btm_chan = 0;
-
-	/* Set measurement in single measurement mode */
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-	if (rc < 0) {
-		pr_err("adc-tm single mode select failed\n");
-		return rc;
-	}
-
-	/* Disable bank */
-	rc = qpnp_adc_tm_disable(chip);
-	if (rc)
-		return rc;
-
-	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check(chip);
-	if (rc < 0) {
-		pr_err("adc-tm req_sts check failed\n");
-		return rc;
-	}
-
-	/* Configure AMUX channel select for the corresponding BTM channel*/
-	btm_chan = chan_prop->chan_prop->tm_channel_select;
-	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel, 1);
-	if (rc < 0) {
-		pr_err("adc-tm channel selection err\n");
-		return rc;
-	}
-
-	/* Digital parameter setup */
-	decimation |= chan_prop->decimation <<
-				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation, 1);
-	if (rc < 0) {
-		pr_err("adc-tm digital parameter setup err\n");
-		return rc;
-	}
-
-	/* Hardware setting time */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
-					chan_prop->hw_settle_time, 1);
-	if (rc < 0) {
-		pr_err("adc-tm hw settling time setup err\n");
-		return rc;
-	}
-
-	/* Fast averaging setup/enable */
-	rc = qpnp_adc_tm_fast_avg_en(chip, &chan_prop->fast_avg_setup);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg enable err\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
-				chan_prop->fast_avg_setup, 1);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg setup err\n");
-		return rc;
-	}
-
-	/* Measurement interval setup */
-	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
-						chan_prop->chan_prop);
-	if (rc < 0) {
-		pr_err("adc-tm timer select failed\n");
-		return rc;
-	}
-
-	/* Channel configuration setup */
-	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
-			chan_prop->chan_prop, chan_prop->amux_channel);
-	if (rc < 0) {
-		pr_err("adc-tm channel configure failed\n");
-		return rc;
-	}
-
-	/* Recurring interval measurement enable */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
-							&op_cntrl, 1);
-	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
-	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
-			op_cntrl, true);
-	if (rc < 0) {
-		pr_err("adc-tm meas interval op configure failed\n");
-		return rc;
-	}
-
-	/* Enable bank */
-	rc = qpnp_adc_tm_enable(chip);
-	if (rc)
-		return rc;
-
-	/* Request conversion */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
-	if (rc < 0) {
-		pr_err("adc-tm request conversion failed\n");
-		return rc;
-	}
-
-	return 0;
-}
-
-static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode *mode)
-{
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-
-	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
-			adc_tm->chip, adc_tm->btm_channel_num))
-		return -EINVAL;
-
-	*mode = adc_tm->mode;
-
-	return 0;
-}
-
-static int qpnp_adc_tm_set_mode(struct thermal_zone_device *thermal,
+static int qpnp_adc_tm_set_mode(struct qpnp_adc_tm_sensor *adc_tm,
 			      enum thermal_device_mode mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, channel;
-	u8 sensor_mask = 0, mode_ctl = 0;
+	u8 sensor_mask = 0;
 	uint32_t btm_chan_idx = 0, btm_chan = 0;
 
 	if (qpnp_adc_tm_is_valid(chip)) {
@@ -1525,32 +931,14 @@
 		chip->adc->amux_prop->calib_type =
 			chip->adc->adc_channels[channel].calib_type;
 
-		if (!chip->adc_tm_hc) {
-			rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-			if (rc) {
-				pr_err("adc-tm configure failed with %d\n", rc);
-				goto fail;
-			}
-		} else {
-			rc = qpnp_adc_tm_hc_configure(chip,
-							chip->adc->amux_prop);
-			if (rc) {
-				pr_err("hc configure failed with %d\n", rc);
-				goto fail;
-			}
+		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("hc configure failed with %d\n", rc);
+			goto fail;
 		}
 	} else if (mode == THERMAL_DEVICE_DISABLED) {
 		sensor_mask = 1 << adc_tm->sensor_num;
 
-		if (!chip->adc_tm_hc) {
-			mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-			rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-			if (rc < 0) {
-				pr_err("adc-tm single mode select failed\n");
-				goto fail;
-			}
-		}
-
 		/* Disable bank */
 		rc = qpnp_adc_tm_disable(chip);
 		if (rc < 0) {
@@ -1558,28 +946,12 @@
 			goto fail;
 		}
 
-		if (!chip->adc_tm_hc) {
-			/* Check if a conversion is in progress */
-			rc = qpnp_adc_tm_req_sts_check(chip);
-			if (rc < 0) {
-				pr_err("adc-tm req_sts check failed\n");
-				goto fail;
-			}
-
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
-			if (rc < 0) {
-				pr_err("multi measurement update failed\n");
-				goto fail;
-			}
-		} else {
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_MEAS_EN, false);
-			if (rc < 0) {
-				pr_err("multi measurement disable failed\n");
-				goto fail;
-			}
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi measurement disable failed\n");
+			goto fail;
 		}
 
 		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -1597,11 +969,13 @@
 	return 0;
 }
 
-static int qpnp_adc_tm_get_trip_type(struct thermal_zone_device *thermal,
-				   int trip, enum thermal_trip_type *type)
+static int qpnp_adc_tm_activate_trip_type(struct qpnp_adc_tm_sensor *adc_tm,
+			int trip, enum thermal_trip_activation_mode mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, sensor_mask = 0;
+	bool state = false;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
 
 	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
@@ -1609,110 +983,48 @@
 	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		*type = THERMAL_TRIP_CONFIGURABLE_HI;
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
-		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
-	break;
-	default:
-		return -EINVAL;
-	}
+	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
+		state = true;
 
-	return 0;
-}
+	sensor_mask = 1 << adc_tm->sensor_num;
 
-static int qpnp_adc_tm_get_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int *temp)
-{
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
-	int64_t result = 0;
-	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
-	unsigned int reg, rc = 0;
-	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
-	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
-	uint32_t btm_chan_idx = 0, btm_chan = 0;
+	pr_debug("Sensor number:%x with state:%d\n",
+					adc_tm->sensor_num, state);
 
-	if (qpnp_adc_tm_is_valid(chip))
-		return -ENODEV;
-
-	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
-		return -EINVAL;
-
-	btm_chan = adc_tm_sensor->btm_channel_num;
+	btm_chan = adc_tm->btm_channel_num;
 	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
 	if (rc < 0) {
 		pr_err("Invalid btm channel idx\n");
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
-		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
-		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
-		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
-	} else {
-		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
-		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
-		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
-		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
-	}
-
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
-						&trip_warm_thr0, 1);
-		if (rc) {
-			pr_err("adc-tm low_thr_lsb err\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
-						&trip_warm_thr1, 1);
-		if (rc) {
-			pr_err("adc-tm low_thr_msb err\n");
-			return rc;
-		}
-	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
+		/* low_thr (lower voltage) for higher temp */
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_LOW_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
-						&trip_cool_thr0, 1);
-		if (rc) {
-			pr_err("adc-tm_tm high_thr_lsb err\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
-						&trip_cool_thr1, 1);
-		if (rc) {
-			pr_err("adc-tm_tm high_thr_lsb err\n");
-			return rc;
-		}
-	reg = (trip_cool_thr1 << 8) | trip_cool_thr0;
+		/* high_thr (higher voltage) for cooler temp */
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
 	break;
 	default:
 		return -EINVAL;
 	}
 
-	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev,
-					chip->adc->adc_prop, reg, &result);
-	if (rc < 0) {
-		pr_err("Failed to lookup the therm thresholds\n");
-		return rc;
-	}
-
-	*temp = result;
-
-	return 0;
+	return rc;
 }
 
-static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int temp)
+static int qpnp_adc_tm_set_trip_temp(void *data, int low_temp, int high_temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm = data;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	struct qpnp_adc_tm_config tm_config;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
@@ -1729,19 +1041,18 @@
 
 	tm_config.channel = adc_tm->vadc_channel_num;
 	tm_config.high_thr_temp = tm_config.low_thr_temp = 0;
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		tm_config.high_thr_temp = temp;
-		break;
-	case ADC_TM_TRIP_LOW_COOL:
-		tm_config.low_thr_temp = temp;
-		break;
-	default:
+	if (high_temp != INT_MAX)
+		tm_config.high_thr_temp = high_temp;
+	if (low_temp != INT_MIN)
+		tm_config.low_thr_temp = low_temp;
+
+	if ((high_temp == INT_MAX) && (low_temp == INT_MIN)) {
+		pr_err("No trips to set\n");
 		return -EINVAL;
 	}
 
-	pr_debug("requested a high - %d and low - %d with trip - %d\n",
-			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
+	pr_debug("requested a high - %d and low - %d\n",
+			tm_config.high_thr_temp, tm_config.low_thr_temp);
 	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev,
 				chip->adc->adc_prop, &tm_config);
 	if (rc < 0) {
@@ -1764,20 +1075,12 @@
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
-		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
-		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
-		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
-	} else {
-		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
-		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
-		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
-		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
-	}
+	reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+	reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+	reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+	reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
 
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
+	if (high_temp != INT_MAX) {
 		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
 						trip_cool_thr0, 1);
 		if (rc) {
@@ -1791,9 +1094,26 @@
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm->low_thr = tm_config.high_thr_voltage;
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
+		adc_tm->low_thr = tm_config.high_thr_voltage;
+
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_HIGH_WARM,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+		if (rc) {
+			pr_err("adc-tm warm activation failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_HIGH_WARM,
+				THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc) {
+			pr_err("adc-tm warm deactivation failed\n");
+			return rc;
+		}
+	}
+
+	if (low_temp != INT_MIN) {
 		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
 						trip_warm_thr0, 1);
 		if (rc) {
@@ -1807,10 +1127,37 @@
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm->high_thr = tm_config.low_thr_voltage;
-	break;
-	default:
-		return -EINVAL;
+		adc_tm->high_thr = tm_config.low_thr_voltage;
+
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_LOW_COOL,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+		if (rc) {
+			pr_err("adc-tm cool activation failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_LOW_COOL,
+				THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc) {
+			pr_err("adc-tm cool deactivation failed\n");
+			return rc;
+		}
+	}
+
+	if ((high_temp != INT_MAX) || (low_temp != INT_MIN)) {
+		rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_ENABLED);
+		if (rc) {
+			pr_err("sensor enabled failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_DISABLED);
+		if (rc) {
+			pr_err("sensor disable failed\n");
+			return rc;
+		}
 	}
 
 	return 0;
@@ -1878,9 +1225,8 @@
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 
 	if (adc_tm->thermal_node) {
-		sysfs_notify(&adc_tm->tz_dev->device.kobj,
-					NULL, "type");
 		pr_debug("notifying uspace client\n");
+		of_thermal_handle_trip(adc_tm->tz_dev);
 	} else {
 		if (adc_tm->scale_type == SCALE_RBATT_THERM)
 			notify_battery_therm(adc_tm);
@@ -1891,222 +1237,23 @@
 	atomic_dec(&chip->wq_cnt);
 }
 
-static int qpnp_adc_tm_activate_trip_type(struct thermal_zone_device *thermal,
-			int trip, enum thermal_trip_activation_mode mode)
-{
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
-	int rc = 0, sensor_mask = 0;
-	u8 thr_int_en = 0;
-	bool state = false;
-	uint32_t btm_chan_idx = 0, btm_chan = 0;
-
-	if (qpnp_adc_tm_is_valid(chip))
-		return -ENODEV;
-
-	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
-		return -EINVAL;
-
-	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
-		state = true;
-
-	sensor_mask = 1 << adc_tm->sensor_num;
-
-	pr_debug("Sensor number:%x with state:%d\n",
-					adc_tm->sensor_num, state);
-
-	btm_chan = adc_tm->btm_channel_num;
-	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
-	if (rc < 0) {
-		pr_err("Invalid btm channel idx\n");
-		return rc;
-	}
-
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		/* low_thr (lower voltage) for higher temp */
-		thr_int_en = adc_tm_data[btm_chan_idx].low_thr_int_chan_en;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_LOW_THR_INT_EN,
-				sensor_mask, state);
-		else
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_LOW_THR_INT_EN, state);
-		if (rc)
-			pr_err("channel:%x failed\n", btm_chan);
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
-		/* high_thr (higher voltage) for cooler temp */
-		thr_int_en = adc_tm_data[btm_chan_idx].high_thr_int_chan_en;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_HIGH_THR_INT_EN,
-				sensor_mask, state);
-		else
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
-		if (rc)
-			pr_err("channel:%x failed\n", btm_chan);
-	break;
-	default:
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip,
-			int sensor_num, u8 status_high, u8 *notify_check)
-{
-	int rc = 0;
-	u8 sensor_mask = 0, mode_ctl = 0;
-	int32_t old_thr = 0, new_thr = 0;
-	uint32_t channel, btm_chan_num, scale_type;
-	struct qpnp_vadc_result result;
-	struct qpnp_adc_thr_client_info *client_info = NULL;
-	struct list_head *thr_list;
-	bool status = false;
-
-	if (!chip->adc_tm_recalib_check) {
-		*notify_check = 1;
-		return rc;
-	}
-
-	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
-		client_info = list_entry(thr_list,
-				struct qpnp_adc_thr_client_info, list);
-		channel = client_info->btm_param->channel;
-		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
-		sensor_mask = 1 << sensor_num;
-
-		rc = qpnp_vadc_read(chip->vadc_dev, channel, &result);
-		if (rc < 0) {
-			pr_err("failure to read vadc channel=%d\n",
-					client_info->btm_param->channel);
-			goto fail;
-		}
-		new_thr = result.physical;
-
-		if (status_high)
-			old_thr = client_info->btm_param->high_thr;
-		else
-			old_thr = client_info->btm_param->low_thr;
-
-		if (new_thr > old_thr)
-			status = (status_high) ? true : false;
-		else
-			status = (status_high) ? false : true;
-
-		pr_debug(
-			"recalib:sen=%d, new_thr=%d, new_thr_adc_code=0x%x, old_thr=%d status=%d valid_status=%d\n",
-			sensor_num, new_thr, result.adc_code,
-			old_thr, status_high, status);
-
-		rc = qpnp_adc_tm_read_thr_value(chip, btm_chan_num);
-		if (rc < 0) {
-			pr_err("adc-tm thresholds read failed\n");
-			goto fail;
-		}
-
-		if (status) {
-			*notify_check = 1;
-			pr_debug("Client can be notify\n");
-			return rc;
-		}
-
-		pr_debug("Client can not be notify, restart measurement\n");
-		/* Set measurement in single measurement mode */
-		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm single mode select failed\n");
-			goto fail;
-		}
-
-		/* Disable bank */
-		rc = qpnp_adc_tm_disable(chip);
-		if (rc < 0) {
-			pr_err("adc-tm disable failed\n");
-			goto fail;
-		}
-
-		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc < 0) {
-			pr_err("adc-tm req_sts check failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int write failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int enable failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi measurement en failed\n");
-			goto fail;
-		}
-
-		/* restart measurement */
-		scale_type = chip->sensor[sensor_num].scale_type;
-		chip->adc->amux_prop->amux_channel = channel;
-		chip->adc->amux_prop->decimation =
-			chip->adc->adc_channels[sensor_num].adc_decimation;
-		chip->adc->amux_prop->hw_settle_time =
-			chip->adc->adc_channels[sensor_num].hw_settle_time;
-		chip->adc->amux_prop->fast_avg_setup =
-			chip->adc->adc_channels[sensor_num].fast_avg_setup;
-		chip->adc->amux_prop->mode_sel =
-			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev,
-				client_info->btm_param,
-				&chip->adc->amux_prop->chan_prop->low_thr,
-				&chip->adc->amux_prop->chan_prop->high_thr);
-		qpnp_adc_tm_add_to_list(chip, sensor_num,
-				client_info->btm_param,
-				chip->adc->amux_prop->chan_prop);
-		chip->adc->amux_prop->chan_prop->tm_channel_select =
-				chip->sensor[sensor_num].btm_channel_num;
-		chip->adc->amux_prop->chan_prop->state_request =
-				client_info->btm_param->state_request;
-
-		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm configure failed with %d\n", rc);
-			goto fail;
-		}
-		*notify_check = 0;
-		pr_debug("BTM channel reconfigured for measuremnt\n");
-	}
-fail:
-	return rc;
-}
-
 static int qpnp_adc_tm_disable_rearm_high_thresholds(
 			struct qpnp_adc_tm_chip *chip, int sensor_num)
 {
 
 	struct qpnp_adc_thr_client_info *client_info = NULL;
 	struct list_head *thr_list;
-	uint32_t btm_chan_num = 0;
-	u8 sensor_mask = 0, notify_check = 0;
+	uint32_t btm_chan_num = 0, btm_chan_idx = 0;
+	u8 sensor_mask = 0;
 	int rc = 0;
 
 	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
 	pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 		sensor_num, chip->th_info.adc_tm_high_enable,
 		chip->th_info.adc_tm_low_enable,
@@ -2118,11 +1265,11 @@
 		 */
 		sensor_mask = 1 << sensor_num;
 		pr_debug("non thermal node - mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_recalib_request_check(chip,
-				sensor_num, true, &notify_check);
-		if (rc < 0 || !notify_check) {
-			pr_debug("Calib recheck re-armed rc=%d\n", rc);
-			chip->th_info.adc_tm_high_enable = 0;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("high threshold int update failed\n");
 			return rc;
 		}
 	} else {
@@ -2134,7 +1281,7 @@
 		sensor_mask = 1 << sensor_num;
 		pr_debug("thermal node with mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
+			&chip->sensor[sensor_num],
 			ADC_TM_TRIP_LOW_COOL,
 			THERMAL_TRIP_ACTIVATION_DISABLED);
 		if (rc < 0) {
@@ -2159,22 +1306,12 @@
 	}
 	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_BTM_Mn_EN(sensor_num),
-			QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip,
+		QPNP_BTM_Mn_EN(sensor_num),
+		QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi meas disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2194,11 +1331,17 @@
 {
 	struct qpnp_adc_thr_client_info *client_info = NULL;
 	struct list_head *thr_list;
-	uint32_t btm_chan_num = 0;
-	u8 sensor_mask = 0, notify_check = 0;
+	uint32_t btm_chan_num = 0, btm_chan_idx = 0;
+	u8 sensor_mask = 0;
 	int rc = 0;
 
 	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
 	pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 		sensor_num, chip->th_info.adc_tm_high_enable,
 		chip->th_info.adc_tm_low_enable,
@@ -2208,20 +1351,13 @@
 		 * For non thermal registered clients such as usb_id,
 		 * vbatt, pmic_therm
 		 */
-		pr_debug("non thermal node - mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_recalib_request_check(chip,
-				sensor_num, false, &notify_check);
-		if (rc < 0 || !notify_check) {
-			pr_debug("Calib recheck re-armed rc=%d\n", rc);
-			chip->th_info.adc_tm_low_enable = 0;
-			return rc;
-		}
 		sensor_mask = 1 << sensor_num;
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_LOW_THR_INT_EN, false);
 		if (rc < 0) {
-			pr_err("low threshold int read failed\n");
+			pr_err("low threshold int update failed\n");
 			return rc;
 		}
 	} else {
@@ -2233,7 +1369,7 @@
 		sensor_mask = 1 << sensor_num;
 		pr_debug("thermal node with mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
+			&chip->sensor[sensor_num],
 			ADC_TM_TRIP_HIGH_WARM,
 			THERMAL_TRIP_ACTIVATION_DISABLED);
 		if (rc < 0) {
@@ -2258,22 +1394,12 @@
 	}
 	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_BTM_Mn_EN(sensor_num),
-			QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip,
+		QPNP_BTM_Mn_EN(sensor_num),
+		QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi meas disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2299,14 +1425,6 @@
 
 	mutex_lock(&chip->adc->adc_lock);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc) {
-			pr_err("adc-tm-tm req sts check failed with %d\n", rc);
-			goto fail;
-		}
-	}
-
 	while (sensor_num < chip->max_channels_available) {
 		if (chip->sensor[sensor_num].high_thr_triggered) {
 			rc = qpnp_adc_tm_disable_rearm_high_thresholds(
@@ -2364,93 +1482,6 @@
 		pr_err("adc-tm high thr work failed\n");
 }
 
-static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
-{
-	struct qpnp_adc_tm_chip *chip = data;
-	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
-	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
-
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	/* Set measurement in single measurement mode */
-	qpnp_adc_tm_mode_select(chip, mode_ctl);
-
-	qpnp_adc_tm_disable(chip);
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
-					&chip->th_info.status_high, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-				&chip->th_info.adc_tm_high_thr_set, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read high thr failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	/*
-	 * Check which interrupt threshold is lower and measure against the
-	 * enabled channel.
-	 */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-				&chip->th_info.qpnp_adc_tm_meas_en, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	chip->th_info.adc_tm_high_enable = chip->th_info.qpnp_adc_tm_meas_en &
-						chip->th_info.status_high;
-	chip->th_info.adc_tm_high_enable &= chip->th_info.adc_tm_high_thr_set;
-
-	sensor_notify_num = chip->th_info.adc_tm_high_enable;
-	while (i < chip->max_channels_available) {
-		if ((sensor_notify_num & 0x1) == 1)
-			sensor_num = i;
-		sensor_notify_num >>= 1;
-		i++;
-	}
-
-	if (!chip->sensor[sensor_num].thermal_node) {
-		sensor_mask = 1 << sensor_num;
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_HIGH_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int read failed\n");
-			return IRQ_HANDLED;
-		}
-	} else {
-		/*
-		 * Uses the thermal sysfs registered device to disable
-		 * the corresponding high voltage threshold which
-		 * is triggered by low temp
-		 */
-		pr_debug("thermal node with mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
-			ADC_TM_TRIP_LOW_COOL,
-			THERMAL_TRIP_ACTIVATION_DISABLED);
-		if (rc < 0) {
-			pr_err("notify error:%d\n", sensor_num);
-			return IRQ_HANDLED;
-		}
-	}
-
-	atomic_inc(&chip->wq_cnt);
-	queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
-
-	return IRQ_HANDLED;
-}
-
 static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
 {
 	struct qpnp_adc_tm_chip *chip = container_of(work,
@@ -2471,88 +1502,6 @@
 		pr_err("adc-tm low thr work failed\n");
 }
 
-static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
-{
-	struct qpnp_adc_tm_chip *chip = data;
-	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
-	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
-
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	/* Set measurement in single measurement mode */
-	qpnp_adc_tm_mode_select(chip, mode_ctl);
-
-	qpnp_adc_tm_disable(chip);
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
-					&chip->th_info.status_low, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status low failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-				&chip->th_info.adc_tm_low_thr_set, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read low thr failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-				&chip->th_info.qpnp_adc_tm_meas_en, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	chip->th_info.adc_tm_low_enable = chip->th_info.qpnp_adc_tm_meas_en &
-					chip->th_info.status_low;
-	chip->th_info.adc_tm_low_enable &= chip->th_info.adc_tm_low_thr_set;
-
-	sensor_notify_num = chip->th_info.adc_tm_low_enable;
-	while (i < chip->max_channels_available) {
-		if ((sensor_notify_num & 0x1) == 1)
-			sensor_num = i;
-		sensor_notify_num >>= 1;
-		i++;
-	}
-
-	if (!chip->sensor[sensor_num].thermal_node) {
-		sensor_mask = 1 << sensor_num;
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int read failed\n");
-			return IRQ_HANDLED;
-		}
-	} else {
-		/* Uses the thermal sysfs registered device to disable
-		 * the corresponding low voltage threshold which
-		 * is triggered by high temp
-		 */
-		pr_debug("thermal node with mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
-			ADC_TM_TRIP_HIGH_WARM,
-			THERMAL_TRIP_ACTIVATION_DISABLED);
-		if (rc < 0) {
-			pr_err("notify error:%d\n", sensor_num);
-			return IRQ_HANDLED;
-		}
-	}
-
-	atomic_inc(&chip->wq_cnt);
-	queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
-
-	return IRQ_HANDLED;
-}
-
 static int qpnp_adc_tm_rc_check_sensor_trip(struct qpnp_adc_tm_chip *chip,
 			u8 status_low, u8 status_high, int i,
 			int *sensor_low_notify_num, int *sensor_high_notify_num)
@@ -2587,7 +1536,7 @@
 			 */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 				rc = qpnp_adc_tm_activate_trip_type(
-					chip->sensor[i].tz_dev,
+					&chip->sensor[i],
 					ADC_TM_TRIP_HIGH_WARM,
 					THERMAL_TRIP_ACTIVATION_DISABLED);
 				if (rc < 0) {
@@ -2618,7 +1567,7 @@
 			 */
 				pr_debug("thermal node with mask:%x\n", i);
 				rc = qpnp_adc_tm_activate_trip_type(
-					chip->sensor[i].tz_dev,
+					&chip->sensor[i],
 					ADC_TM_TRIP_LOW_COOL,
 					THERMAL_TRIP_ACTIVATION_DISABLED);
 				if (rc < 0) {
@@ -2688,10 +1637,9 @@
 	return IRQ_HANDLED;
 }
 
-static int qpnp_adc_read_temp(struct thermal_zone_device *thermal,
-			     int *temp)
+static int qpnp_adc_read_temp(void *data, int *temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = data;
 	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	struct qpnp_vadc_result result;
 	int rc = 0;
@@ -2706,14 +1654,9 @@
 	return rc;
 }
 
-static struct thermal_zone_device_ops qpnp_adc_tm_thermal_ops = {
+static struct thermal_zone_of_device_ops qpnp_adc_tm_thermal_ops = {
 	.get_temp = qpnp_adc_read_temp,
-	.get_mode = qpnp_adc_tm_get_mode,
-	.set_mode = qpnp_adc_tm_set_mode,
-	.get_trip_type = qpnp_adc_tm_get_trip_type,
-	.activate_trip_type = qpnp_adc_tm_activate_trip_type,
-	.get_trip_temp = qpnp_adc_tm_get_trip_temp,
-	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
+	.set_trips = qpnp_adc_tm_set_trip_temp,
 };
 
 int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
@@ -2807,18 +1750,11 @@
 					param->state_request;
 	chip->adc->amux_prop->calib_type =
 			chip->adc->adc_channels[dt_index].calib_type;
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm configure failed with %d\n", rc);
-			goto fail_unlock;
-		}
-	} else {
-		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm hc configure failed with %d\n", rc);
-			goto fail_unlock;
-		}
+
+	rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+	if (rc) {
+		pr_err("adc-tm hc configure failed with %d\n", rc);
+		goto fail_unlock;
 	}
 
 	chip->sensor[dt_index].scale_type = scale_type;
@@ -2834,7 +1770,6 @@
 					struct qpnp_adc_tm_btm_param *param)
 {
 	uint32_t channel, dt_index = 0, btm_chan_num;
-	u8 sensor_mask = 0, mode_ctl = 0;
 	int rc = 0;
 
 	if (qpnp_adc_tm_is_valid(chip))
@@ -2842,16 +1777,6 @@
 
 	mutex_lock(&chip->adc->adc_lock);
 
-	if (!chip->adc_tm_hc) {
-		/* Set measurement in single measurement mode */
-		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm single mode select failed\n");
-			goto fail;
-		}
-	}
-
 	/* Disable bank */
 	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0) {
@@ -2859,15 +1784,6 @@
 		goto fail;
 	}
 
-	if (!chip->adc_tm_hc) {
-		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc < 0) {
-			pr_err("adc-tm req_sts check failed\n");
-			goto fail;
-		}
-	}
-
 	channel = param->channel;
 	while ((chip->adc->adc_channels[dt_index].channel_num
 		!= channel) && (dt_index < chip->max_channels_available))
@@ -2881,50 +1797,25 @@
 
 	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
 
-	if (!chip->adc_tm_hc) {
-		sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+	if (rc < 0) {
+		pr_err("high thr disable err:%d\n", btm_chan_num);
+		return rc;
+	}
 
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int write failed\n");
-			goto fail;
-		}
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+	if (rc < 0) {
+		pr_err("low thr disable err:%d\n", btm_chan_num);
+		return rc;
+	}
 
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int enable failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi measurement en failed\n");
-			goto fail;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
-		if (rc < 0) {
-			pr_err("high thr disable err:%d\n", btm_chan_num);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
-		if (rc < 0) {
-			pr_err("low thr disable err:%d\n", btm_chan_num);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi measurement disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi measurement disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2938,22 +1829,6 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
 
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
-				struct qpnp_adc_tm_btm_param *param)
-{
-	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
-	return qpnp_adc_tm_channel_measure(chip, param);
-}
-EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
-
-int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
-{
-	struct qpnp_adc_tm_btm_param param;
-
-	return qpnp_adc_tm_disable_chan_meas(chip, &param);
-}
-EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
-
 struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
 {
 	struct qpnp_adc_tm_chip *chip;
@@ -2974,35 +1849,6 @@
 }
 EXPORT_SYMBOL(qpnp_get_adc_tm);
 
-static int qpnp_adc_tm_initial_setup(struct qpnp_adc_tm_chip *chip)
-{
-	u8 thr_init = 0;
-	int rc = 0;
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("high thr init failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("low thr init failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("multi meas en failed\n");
-		return rc;
-	}
-
-	return rc;
-}
-
 static const struct of_device_id qpnp_adc_tm_match_table[] = {
 	{	.compatible = "qcom,qpnp-adc-tm" },
 	{	.compatible = "qcom,qpnp-adc-tm-hc" },
@@ -3055,10 +1901,8 @@
 		goto fail;
 	}
 
-	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
-		chip->adc_tm_hc = true;
-		chip->adc->adc_hc = true;
-	}
+	chip->adc_tm_hc = true;
+	chip->adc->adc_hc = true;
 
 	rc = qpnp_adc_get_devicetree_data(pdev, chip->adc);
 	if (rc) {
@@ -3067,25 +1911,6 @@
 	}
 	mutex_init(&chip->adc->adc_lock);
 
-	/* Register the ADC peripheral interrupt */
-	if (!chip->adc_tm_hc) {
-		chip->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
-						"high-thr-en-set");
-		if (chip->adc->adc_high_thr_irq < 0) {
-			pr_err("Invalid irq\n");
-			rc = -ENXIO;
-			goto fail;
-		}
-
-		chip->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
-						"low-thr-en-set");
-		if (chip->adc->adc_low_thr_irq < 0) {
-			pr_err("Invalid irq\n");
-			rc = -ENXIO;
-			goto fail;
-		}
-	}
-
 	chip->vadc_dev = qpnp_get_vadc(&pdev->dev, "adc_tm");
 	if (IS_ERR(chip->vadc_dev)) {
 		rc = PTR_ERR(chip->vadc_dev);
@@ -3155,10 +1980,11 @@
 			chip->sensor[sen_idx].high_thr =
 						QPNP_ADC_TM_M0_HIGH_THR;
 			chip->sensor[sen_idx].tz_dev =
-				thermal_zone_device_register(name,
-				ADC_TM_TRIP_NUM, ADC_TM_WRITABLE_TRIPS_MASK,
+				devm_thermal_zone_of_sensor_register(
+				chip->dev,
+				chip->sensor[sen_idx].vadc_channel_num,
 				&chip->sensor[sen_idx],
-				&qpnp_adc_tm_thermal_ops, NULL, 0, 0);
+				&qpnp_adc_tm_thermal_ops);
 			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
 				pr_err("thermal device register failed.\n");
 		}
@@ -3173,18 +1999,21 @@
 		sen_idx++;
 	}
 	chip->max_channels_available = count_adc_channel_list;
+
 	chip->high_thr_wq = alloc_workqueue("qpnp_adc_tm_high_thr_wq",
 							WQ_HIGHPRI, 0);
 	if (!chip->high_thr_wq) {
 		pr_err("Requesting high thr priority wq failed\n");
 		goto fail;
 	}
+
 	chip->low_thr_wq = alloc_workqueue("qpnp_adc_tm_low_thr_wq",
 							WQ_HIGHPRI, 0);
 	if (!chip->low_thr_wq) {
 		pr_err("Requesting low thr priority wq failed\n");
 		goto fail;
 	}
+
 	chip->thr_wq = alloc_workqueue("qpnp_adc_tm_thr_wq",
 						WQ_HIGHPRI, 0);
 	if (!chip->thr_wq) {
@@ -3196,39 +2025,13 @@
 	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
 	atomic_set(&chip->wq_cnt, 0);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_initial_setup(chip);
-		if (rc)
-			goto fail;
-
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_high_thr_irq,
-				qpnp_adc_tm_high_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
-		if (rc) {
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-			goto fail;
-		} else {
-			enable_irq_wake(chip->adc->adc_high_thr_irq);
-		}
-
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_low_thr_irq,
-					qpnp_adc_tm_low_thr_isr,
-			IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
-		if (rc) {
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-			goto fail;
-		} else {
-			enable_irq_wake(chip->adc->adc_low_thr_irq);
-		}
-	} else {
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
-				qpnp_adc_tm_rc_thr_isr,
-			IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
-		if (rc)
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-		else
-			enable_irq_wake(chip->adc->adc_irq_eoc);
-	}
+	rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
+			qpnp_adc_tm_rc_thr_isr,
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+	if (rc)
+		dev_err(&pdev->dev, "failed to request adc irq\n");
+	else
+		enable_irq_wake(chip->adc->adc_irq_eoc);
 
 	chip->adc_vote_enable = false;
 	dev_set_drvdata(&pdev->dev, chip);
@@ -3258,17 +2061,11 @@
 {
 	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
 	struct device_node *node = pdev->dev.of_node, *child;
-	bool thermal_node = false;
 	int i = 0;
 
 	for_each_child_of_node(node, child) {
-		thermal_node = of_property_read_bool(child,
-					"qcom,thermal-node");
-		if (thermal_node) {
-			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
-			if (chip->sensor[i].req_wq)
-				destroy_workqueue(chip->sensor[i].req_wq);
-		}
+		if (chip->sensor[i].req_wq)
+			destroy_workqueue(chip->sensor[i].req_wq);
 		i++;
 	}
 
@@ -3286,40 +2083,20 @@
 static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
 {
 	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
-	int rc = 0;
-	u8 reg_val = 0, status1 = 0, en_ctl1 = 0;
-
-	/* Set measurement in single measurement mode */
-	reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	rc = qpnp_adc_tm_mode_select(chip, reg_val);
-	if (rc < 0)
-		pr_err("adc-tm single mode select failed\n");
+	int rc = 0, i = 0;
 
 	/* Disable bank */
 	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0)
 		pr_err("adc-tm disable failed\n");
 
-	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check(chip);
-	if (rc < 0)
-		pr_err("adc-tm req_sts check failed\n");
-
-	/* Disable multimeasurement */
-	reg_val = 0;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
-	if (rc < 0)
-		pr_err("adc-tm multi-measurement mode disable failed\n");
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc < 0)
-		pr_err("adc-tm status1 read failed\n");
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_EN_CTL1, &en_ctl1, 1);
-	if (rc < 0)
-		pr_err("adc-tm en_ctl1 read failed\n");
-
-	pr_debug("adc-tm status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+	for (i = 0; i < QPNP_BTM_CHANNELS; i++) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(i),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0)
+			pr_err("multi measurement disable failed\n");
+	}
 }
 
 static int qpnp_adc_tm_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6e3e636..22d32d2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5621,17 +5621,15 @@
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
-	const struct pciserial_board *board;
+	struct serial_private *new;
 
 	if (!priv)
 		return;
 
-	board = priv->board;
-	kfree(priv);
-	priv = pciserial_init_ports(dev, board);
-
-	if (!IS_ERR(priv)) {
-		pci_set_drvdata(dev, priv);
+	new = pciserial_init_ports(dev, priv->board);
+	if (!IS_ERR(new)) {
+		pci_set_drvdata(dev, new);
+		kfree(priv);
 	}
 }
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 3fec1d7..6a3f2ac 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -379,7 +379,6 @@
 
 	ret = pm_runtime_get_sync(uport->dev);
 	if (ret < 0) {
-		dev_err(uport->dev, "%s: Failed (%d)", __func__, ret);
 		pm_runtime_put_noidle(uport->dev);
 		pm_runtime_set_suspended(uport->dev);
 		return ret;
@@ -489,10 +488,8 @@
 	unsigned int s_irq_status;
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
-			M_SEC_IRQ_EN, true))) {
-		dev_err(uport->dev, "%s: Failed waiting for SE\n", __func__);
+			M_SEC_IRQ_EN, true)))
 		return -ENXIO;
-	}
 
 	m_irq_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_STATUS);
@@ -504,10 +501,8 @@
 						SE_GENI_S_IRQ_CLEAR);
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_RX_FIFO_STATUS,
-			RX_FIFO_WC_MSK, true))) {
-		dev_err(uport->dev, "%s: Failed waiting for Rx\n", __func__);
+			RX_FIFO_WC_MSK, true)))
 		return -ENXIO;
-	}
 
 	/*
 	 * Read the Rx FIFO only after clearing the interrupt registers and
@@ -610,10 +605,8 @@
 	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
 
 	port = get_port_from_line(co->index, true);
-	if (IS_ERR_OR_NULL(port)) {
-		pr_err("%s:Invalid line %d\n", __func__, co->index);
+	if (IS_ERR_OR_NULL(port))
 		return;
-	}
 
 	uport = &port->uport;
 	spin_lock(&uport->lock);
@@ -982,7 +975,7 @@
 	port->tx_fifo_width = get_tx_fifo_width(uport->membase);
 	if (!port->tx_fifo_width) {
 		dev_err(uport->dev, "%s:Invalid TX FIFO width read\n",
-								 __func__);
+								__func__);
 		return -ENXIO;
 	}
 
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 56df0f6..62574bf 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1451,9 +1451,6 @@
 		unsigned transfer_in_flight;
 		unsigned started;
 
-		if (dep->flags & DWC3_EP_STALL)
-			return 0;
-
 		if (dep->number > 1)
 			trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
 		else
@@ -1478,8 +1475,6 @@
 		else
 			dep->flags |= DWC3_EP_STALL;
 	} else {
-		if (!(dep->flags & DWC3_EP_STALL))
-			return 0;
 
 		ret = dwc3_send_clear_stall_ep_cmd(dep);
 		if (ret)
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index a2a9185..51ab794 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -17,6 +17,7 @@
 #include <linux/device.h>
 #include <linux/usb/audio.h>
 #include <linux/wait.h>
+#include <linux/pm_qos.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
@@ -268,6 +269,8 @@
 	/* number of frames sent since start_time */
 	s64				frames_sent;
 	struct audio_source_config	*config;
+	/* for creating and issuing QoS requests */
+	struct pm_qos_request pm_qos;
 };
 
 static inline struct audio_dev *func_to_audio(struct usb_function *f)
@@ -740,6 +743,10 @@
 	runtime->hw.channels_max = 2;
 
 	audio->substream = substream;
+
+	/* Add the QoS request and set the latency to 0 */
+	pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
 	return 0;
 }
 
@@ -749,6 +756,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&audio->lock, flags);
+
+	/* Remove the QoS request */
+	pm_qos_remove_request(&audio->pm_qos);
+
 	audio->substream = NULL;
 	spin_unlock_irqrestore(&audio->lock, flags);
 
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index aaa0fc2..af1bca6 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -47,6 +47,7 @@
 #define MTP_BULK_BUFFER_SIZE       16384
 #define INTR_BUFFER_SIZE           28
 #define MAX_INST_NAME_LEN          40
+#define MTP_MAX_FILE_SIZE          0xFFFFFFFFL
 
 /* String IDs */
 #define INTERFACE_STRING_INDEX	0
@@ -837,7 +838,12 @@
 		if (hdr_size) {
 			/* prepend MTP data header */
 			header = (struct mtp_data_header *)req->buf;
-			header->length = __cpu_to_le32(count);
+			/*
+                         * set file size with header according to
+                         * MTP Specification v1.0
+                         */
+			header->length = (count > MTP_MAX_FILE_SIZE) ?
+				MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
 			header->type = __cpu_to_le16(2); /* data packet */
 			header->command = __cpu_to_le16(dev->xfer_command);
 			header->transaction_id =
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e643c7..18dc18f 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -315,7 +315,32 @@
 	struct page **pages;
 	pgoff_t next_index;
 	int nr_pages = 0;
-	int ret;
+	int got = 0;
+	int ret = 0;
+
+	if (!current->journal_info) {
+		/* caller of readpages does not hold buffer and read caps
+		 * (fadvise, madvise and readahead cases) */
+		int want = CEPH_CAP_FILE_CACHE;
+		ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+		if (ret < 0) {
+			dout("start_read %p, error getting cap\n", inode);
+		} else if (!(got & want)) {
+			dout("start_read %p, no cache cap\n", inode);
+			ret = 0;
+		}
+		if (ret <= 0) {
+			if (got)
+				ceph_put_cap_refs(ci, got);
+			while (!list_empty(page_list)) {
+				page = list_entry(page_list->prev,
+						  struct page, lru);
+				list_del(&page->lru);
+				put_page(page);
+			}
+			return ret;
+		}
+	}
 
 	off = (u64) page_offset(page);
 
@@ -338,15 +363,18 @@
 				    CEPH_OSD_FLAG_READ, NULL,
 				    ci->i_truncate_seq, ci->i_truncate_size,
 				    false);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
 
 	/* build page vector */
 	nr_pages = calc_pages_for(0, len);
 	pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!pages)
-		goto out;
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
 	for (i = 0; i < nr_pages; ++i) {
 		page = list_entry(page_list->prev, struct page, lru);
 		BUG_ON(PageLocked(page));
@@ -379,6 +407,12 @@
 	if (ret < 0)
 		goto out_pages;
 	ceph_osdc_put_request(req);
+
+	/* After adding locked pages to page cache, the inode holds cache cap.
+	 * So we can drop our cap refs. */
+	if (got)
+		ceph_put_cap_refs(ci, got);
+
 	return nr_pages;
 
 out_pages:
@@ -387,8 +421,11 @@
 		unlock_page(pages[i]);
 	}
 	ceph_put_page_vector(pages, nr_pages, false);
-out:
+out_put:
 	ceph_osdc_put_request(req);
+out:
+	if (got)
+		ceph_put_cap_refs(ci, got);
 	return ret;
 }
 
@@ -425,7 +462,6 @@
 		rc = start_read(inode, page_list, max);
 		if (rc < 0)
 			goto out;
-		BUG_ON(rc == 0);
 	}
 out:
 	ceph_fscache_readpages_cancel(inode, page_list);
@@ -1372,9 +1408,11 @@
 	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
-	    ci->i_inline_version == CEPH_INLINE_NONE)
+	    ci->i_inline_version == CEPH_INLINE_NONE) {
+		current->journal_info = vma->vm_file;
 		ret = filemap_fault(vma, vmf);
-	else
+		current->journal_info = NULL;
+	} else
 		ret = -EAGAIN;
 
 	dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f3f2110..03951f9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2479,6 +2479,27 @@
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
 
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+{
+	int ret, err = 0;
+
+	BUG_ON(need & ~CEPH_CAP_FILE_RD);
+	BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+	ret = ceph_pool_perm_check(ci, need);
+	if (ret < 0)
+		return ret;
+
+	ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+	if (ret) {
+		if (err == -EAGAIN) {
+			ret = 0;
+		} else if (err < 0) {
+			ret = err;
+		}
+	}
+	return ret;
+}
+
 /*
  * Wait for caps, and take cap references.  If we can't get a WR cap
  * due to a small max_size, make sure we check_max_size (and possibly
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index f995e35..ca3f630 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1249,8 +1249,9 @@
 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
 		     ceph_cap_string(got));
-
+		current->journal_info = filp;
 		ret = generic_file_read_iter(iocb, to);
+		current->journal_info = NULL;
 	}
 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3e3fa916..622d5dd 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -905,6 +905,8 @@
 
 extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
 			 loff_t endoff, int *got, struct page **pinned_page);
+extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+			     int need, int want, int *got);
 
 /* for counting open files by mode */
 extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 94661cf..b3830f7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -241,6 +241,7 @@
 	/* verify the message */
 	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
 	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+	int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
 	void (*downgrade_oplock)(struct TCP_Server_Info *,
 					struct cifsInodeInfo *, bool);
 	/* process transaction2 response */
@@ -1314,12 +1315,19 @@
 	void *callback_data;	  /* general purpose pointer for callback */
 	void *resp_buf;		/* pointer to received SMB header */
 	int mid_state;	/* wish this were enum but can not pass to wait_event */
+	unsigned int mid_flags;
 	__le16 command;		/* smb command code */
 	bool large_buf:1;	/* if valid response, is pointer to large buf */
 	bool multiRsp:1;	/* multiple trans2 responses for one request  */
 	bool multiEnd:1;	/* both received */
 };
 
+struct close_cancelled_open {
+	struct cifs_fid         fid;
+	struct cifs_tcon        *tcon;
+	struct work_struct      work;
+};
+
 /*	Make code in transport.c a little cleaner by moving
 	update of optional stats into function below */
 #ifdef CONFIG_CIFS_STATS2
@@ -1451,6 +1459,9 @@
 #define   MID_RESPONSE_MALFORMED 0x10
 #define   MID_SHUTDOWN		 0x20
 
+/* Flags */
+#define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
 #define   CIFS_SMALL_BUFFER     1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e3fed92..586fdac 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1423,6 +1423,8 @@
 
 	length = discard_remaining_data(server);
 	dequeue_mid(mid, rdata->result);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
@@ -1534,6 +1536,8 @@
 		return cifs_readv_discard(server, mid);
 
 	dequeue_mid(mid, false);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 893be07..b8015de 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -882,10 +882,19 @@
 
 		server->lstrp = jiffies;
 		if (mid_entry != NULL) {
+			if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+			     mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+					server->ops->handle_cancelled_mid)
+				server->ops->handle_cancelled_mid(
+							mid_entry->resp_buf,
+							server);
+
 			if (!mid_entry->multiRsp || mid_entry->multiEnd)
 				mid_entry->callback(mid_entry);
-		} else if (!server->ops->is_oplock_break ||
-			   !server->ops->is_oplock_break(buf, server)) {
+		} else if (server->ops->is_oplock_break &&
+			   server->ops->is_oplock_break(buf, server)) {
+			cifs_dbg(FYI, "Received oplock break\n");
+		} else {
 			cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
 				 atomic_read(&midCount));
 			cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3d38348..9730780 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -654,3 +654,47 @@
 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
 	return false;
 }
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+	struct close_cancelled_open *cancelled = container_of(work,
+					struct close_cancelled_open, work);
+
+	cifs_dbg(VFS, "Close unmatched open\n");
+
+	SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+		   cancelled->fid.volatile_fid);
+	cifs_put_tcon(cancelled->tcon);
+	kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+	struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
+	struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+	struct cifs_tcon *tcon;
+	struct close_cancelled_open *cancelled;
+
+	if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
+		return 0;
+
+	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+	if (!cancelled)
+		return -ENOMEM;
+
+	tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
+	if (!tcon) {
+		kfree(cancelled);
+		return -ENOENT;
+	}
+
+	cancelled->fid.persistent_fid = rsp->PersistentFileId;
+	cancelled->fid.volatile_fid = rsp->VolatileFileId;
+	cancelled->tcon = tcon;
+	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+	queue_work(cifsiod_wq, &cancelled->work);
+
+	return 0;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 5d456eb..007abf7 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1565,6 +1565,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1645,6 +1646,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1727,6 +1729,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1815,6 +1818,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index f2d511a..04ef6e9 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@
 			      struct smb_rqst *rqst);
 extern struct mid_q_entry *smb2_setup_async_request(
 			struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+					   __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+						__u64 ses_id, __u32  tid);
 extern int smb2_calc_signature(struct smb_rqst *rqst,
 				struct TCP_Server_Info *server);
 extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -158,6 +162,9 @@
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
 			     const u64 persistent_fid, const u64 volatile_fid,
 			     const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+					struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
 			 u64 persistent_file_id, u64 volatile_file_id,
 			 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index bc9a7b6..390b0d0 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,22 +115,68 @@
 }
 
 static struct cifs_ses *
-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
 {
 	struct cifs_ses *ses;
 
-	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
-		if (ses->Suid != smb2hdr->SessionId)
+		if (ses->Suid != ses_id)
 			continue;
-		spin_unlock(&cifs_tcp_ses_lock);
 		return ses;
 	}
-	spin_unlock(&cifs_tcp_ses_lock);
 
 	return NULL;
 }
 
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+	struct cifs_ses *ses;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
+{
+	struct cifs_tcon *tcon;
+
+	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+		if (tcon->tid != tid)
+			continue;
+		++tcon->tc_count;
+		return tcon;
+	}
+
+	return NULL;
+}
+
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+	struct cifs_ses *ses;
+	struct cifs_tcon *tcon;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	if (!ses) {
+		spin_unlock(&cifs_tcp_ses_lock);
+		return NULL;
+	}
+	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return tcon;
+}
 
 int
 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
@@ -142,7 +188,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
@@ -359,7 +405,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 206a597..cc26d41 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -727,9 +727,11 @@
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
+		cifs_dbg(FYI, "Cancelling wait for mid %llu\n",	midQ->mid);
 		send_cancel(ses->server, buf, midQ);
 		spin_lock(&GlobalMid_Lock);
 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+			midQ->mid_flags |= MID_WAIT_CANCELLED;
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
 			cifs_small_buf_release(buf);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b938fa7..7ec77f8 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -40,6 +40,7 @@
 	short unsigned settime_flags;	/* to show in fdinfo */
 	struct rcu_head rcu;
 	struct list_head clist;
+	spinlock_t cancel_lock;
 	bool might_cancel;
 };
 
@@ -113,7 +114,7 @@
 	rcu_read_unlock();
 }
 
-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
 	if (ctx->might_cancel) {
 		ctx->might_cancel = false;
@@ -123,6 +124,13 @@
 	}
 }
 
+static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+{
+	spin_lock(&ctx->cancel_lock);
+	__timerfd_remove_cancel(ctx);
+	spin_unlock(&ctx->cancel_lock);
+}
+
 static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
 	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
@@ -133,6 +141,7 @@
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
+	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
 	     ctx->clockid == CLOCK_REALTIME_ALARM ||
 	     ctx->clockid == CLOCK_POWEROFF_ALARM) &&
@@ -143,9 +152,10 @@
 			list_add_rcu(&ctx->clist, &cancel_list);
 			spin_unlock(&cancel_lock);
 		}
-	} else if (ctx->might_cancel) {
-		timerfd_remove_cancel(ctx);
+	} else {
+		__timerfd_remove_cancel(ctx);
 	}
+	spin_unlock(&ctx->cancel_lock);
 }
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
@@ -403,6 +413,7 @@
 		return -ENOMEM;
 
 	init_waitqueue_head(&ctx->wqh);
+	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
 	if (isalarm(ctx)) {
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 24dd11e..91ea077 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -55,7 +55,6 @@
 #define DISP_CC_PLL0						38
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				39
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				40
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			41
 
 #define DISP_CC_MDSS_CORE_BCR					0
 #define DISP_CC_MDSS_GCC_CLOCKS_BCR				1
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index a31fa20..f3c3d1d 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -27,7 +27,5 @@
 #define RPMH_RF_CLK2_A						9
 #define RPMH_RF_CLK3						10
 #define RPMH_RF_CLK3_A						11
-#define RPMH_QDSS_CLK						12
-#define RPMH_QDSS_A_CLK						13
 
 #endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index ec7047c..0538291 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -41,13 +41,6 @@
 
 extern struct bus_type coresight_bustype;
 
-enum coresight_clk_rate {
-	CORESIGHT_CLK_RATE_OFF,
-	CORESIGHT_CLK_RATE_TRACE = 1000,
-	CORESIGHT_CLK_RATE_HSTRACE = 2000,
-	CORESIGHT_CLK_RATE_FIXED = 3000,
-};
-
 enum coresight_dev_type {
 	CORESIGHT_DEV_TYPE_NONE,
 	CORESIGHT_DEV_TYPE_SINK,
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 1c13cd2..0e4586f 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -2216,25 +2216,6 @@
 #if defined(CONFIG_THERMAL_QPNP_ADC_TM)				\
 			|| defined(CONFIG_THERMAL_QPNP_ADC_TM_MODULE)
 /**
- * qpnp_adc_tm_usbid_configure() - Configures Channel 0 of VADC_BTM to
- *		monitor USB_ID channel using 100k internal pull-up.
- *		USB driver passes the high/low voltage threshold along
- *		with the notification callback once the set thresholds
- *		are crossed.
- * @param:	Structure pointer of qpnp_adc_tm_usbid_param type.
- *		Clients pass the low/high voltage along with the threshold
- *		notification callback.
- */
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
-					struct qpnp_adc_tm_btm_param *param);
-/**
- * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
- *		assigned for monitoring USB_ID. Disables the low/high
- *		threshold activation for channel 0 as well.
- * @param:	none.
- */
-int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
-/**
  * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
  *		monitor the corresponding ADC channel for threshold detection.
  *		Driver passes the high/low voltage threshold along
diff --git a/include/sound/voice_svc.h b/include/sound/voice_svc.h
deleted file mode 100644
index 035053f..0000000
--- a/include/sound/voice_svc.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __VOICE_SVC_H__
-#define __VOICE_SVC_H__
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define VOICE_SVC_DRIVER_NAME "voice_svc"
-
-#define VOICE_SVC_MVM_STR "MVM"
-#define VOICE_SVC_CVS_STR "CVS"
-#define MAX_APR_SERVICE_NAME_LEN  64
-
-#define MSG_REGISTER 0x1
-#define MSG_REQUEST  0x2
-#define MSG_RESPONSE 0x3
-
-struct voice_svc_write_msg {
-	__u32 msg_type;
-	__u8 payload[0];
-};
-
-struct voice_svc_register {
-	char svc_name[MAX_APR_SERVICE_NAME_LEN];
-	__u32 src_port;
-	__u8 reg_flag;
-};
-
-struct voice_svc_cmd_response {
-	__u32 src_port;
-	__u32 dest_port;
-	__u32 token;
-	__u32 opcode;
-	__u32 payload_size;
-	__u8 payload[0];
-};
-
-struct voice_svc_cmd_request {
-	char svc_name[MAX_APR_SERVICE_NAME_LEN];
-	__u32 src_port;
-	__u32 dest_port;
-	__u32 token;
-	__u32 opcode;
-	__u32 payload_size;
-	__u8 payload[0];
-};
-
-#endif
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index b0350f0..27e9ef8 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -18,7 +18,6 @@
 header-y += audio_slimslave.h
 header-y += voice_params.h
 header-y += audio_effects.h
-header-y += voice_svc.h
 header-y += devdep_params.h
 header-y += msmcal-hwdep.h
 header-y += wcd-dsp-glink.h
diff --git a/init/Kconfig b/init/Kconfig
index 007186d..2c382dc1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -795,19 +795,6 @@
 
 endchoice
 
-config RCU_EXPEDITE_BOOT
-	bool
-	default n
-	help
-	  This option enables expedited grace periods at boot time,
-	  as if rcu_expedite_gp() had been invoked early in boot.
-	  The corresponding rcu_unexpedite_gp() is invoked from
-	  rcu_end_inkernel_boot(), which is intended to be invoked
-	  at the end of the kernel-only boot sequence, just before
-	  init is exec'ed.
-
-	  Accept the default if unsure.
-
 endmenu # "RCU Subsystem"
 
 config BUILD_BIN2C
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 30e0107..3c32c74 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -29,7 +29,6 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INET6_AH=y
-CONFIG_INET6_DIAG_DESTROY=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_INET=y
@@ -72,7 +71,6 @@
 CONFIG_NET=y
 CONFIG_NETDEVICES=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_TPROXY=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -173,5 +171,4 @@
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_OTG_WAKELOCK=y
 CONFIG_XFRM_USER=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1d203e1..21a8764 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1488,14 +1488,12 @@
 	/* (Un)Install the callbacks for further cpu hotplug operations */
 	struct cpuhp_step *sp;
 
-	mutex_lock(&cpuhp_state_mutex);
 	sp = cpuhp_get_step(state);
 	sp->startup.single = startup;
 	sp->teardown.single = teardown;
 	sp->name = name;
 	sp->multi_instance = multi_instance;
 	INIT_HLIST_HEAD(&sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 }
 
 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@@ -1565,16 +1563,13 @@
 {
 	enum cpuhp_state i;
 
-	mutex_lock(&cpuhp_state_mutex);
 	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
 		if (cpuhp_ap_states[i].name)
 			continue;
 
 		cpuhp_ap_states[i].name = "Reserved";
-		mutex_unlock(&cpuhp_state_mutex);
 		return i;
 	}
-	mutex_unlock(&cpuhp_state_mutex);
 	WARN(1, "No more dynamic states available for CPU hotplug\n");
 	return -ENOSPC;
 }
@@ -1591,6 +1586,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (!invoke || !sp->startup.multi)
 		goto add_node;
@@ -1615,11 +1611,10 @@
 	}
 add_node:
 	ret = 0;
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_add_head(node, &sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 
 err:
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 	return ret;
 }
@@ -1648,6 +1643,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	/* currently assignments for the ONLINE state are possible */
 	if (state == CPUHP_AP_ONLINE_DYN) {
@@ -1683,6 +1679,8 @@
 		}
 	}
 out:
+	mutex_unlock(&cpuhp_state_mutex);
+
 	put_online_cpus();
 	if (!ret && dyn_state)
 		return state;
@@ -1702,6 +1700,8 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
+
 	if (!invoke || !cpuhp_get_teardown_cb(state))
 		goto remove;
 	/*
@@ -1718,7 +1718,6 @@
 	}
 
 remove:
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_del(node);
 	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
@@ -1743,6 +1742,7 @@
 	BUG_ON(cpuhp_cb_check(state));
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (sp->multi_instance) {
 		WARN(!hlist_empty(&sp->list),
@@ -1768,6 +1768,7 @@
 	}
 remove:
 	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4f6db7e..9e03db9 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -132,8 +132,7 @@
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
-static atomic_t rcu_expedited_nesting =
-	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
+static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
 
 /*
  * Should normal grace-period primitives be expedited?  Intended for
@@ -182,8 +181,7 @@
  */
 void rcu_end_inkernel_boot(void)
 {
-	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
-		rcu_unexpedite_gp();
+	rcu_unexpedite_gp();
 	if (rcu_normal_after_boot)
 		WRITE_ONCE(rcu_normal, 1);
 }
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 449e4a3..c3f8005 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -58,6 +58,8 @@
 	(5490 - 5590 @ 80), (36)
 	(5650 - 5730 @ 80), (36)
 	(5735 - 5835 @ 80), (36)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40), NO-OUTDOOR
 
 country AS: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -91,6 +93,9 @@
 	(5650 - 5730 @ 80), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (43), NO-OUTDOOR
+
 country AW: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -107,8 +112,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country BB: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -135,7 +138,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57240 - 65880 @ 2160), (40)
 
 country BF: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -190,6 +193,9 @@
 	(5250 - 5330 @ 80), (30), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country BR: DFS-FCC
 	(2402 - 2482 @ 40), (30)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -197,6 +203,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
 country BS: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -230,6 +239,9 @@
 	(5650 - 5730 @ 80), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
 country CF: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 40), (24)
@@ -252,6 +264,9 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country CI: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -264,16 +279,16 @@
 	(5170 - 5330 @ 160), (20)
 	(5735 - 5835 @ 80), (20)
 
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (50), NO-OUTDOOR
+
 country CN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5735 - 5835 @ 80), (33)
-	# 60 gHz band channels 1,4: 28dBm, channels 2,3: 44dBm
-	# ref: http://www.miit.gov.cn/n11293472/n11505629/n11506593/n11960250/n11960606/n11960700/n12330791.files/n12330790.pdf
-	(57240 - 59400 @ 2160), (28)
+	# 60 gHz band channels 2,3: 44dBm
 	(59400 - 63720 @ 2160), (44)
-	(63720 - 65880 @ 2160), (28)
 
 country CO: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -289,6 +304,9 @@
 	(5490 - 5730 @ 20), (24), DFS
 	(5735 - 5835 @ 20), (30)
 
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (30)
+
 country CX: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -397,6 +415,9 @@
 	(5490 - 5730 @ 20), (24), DFS
 	(5735 - 5835 @ 20), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country EE: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -511,8 +532,6 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (18), AUTO-BW
 	(5250 - 5330 @ 80), (18), DFS, AUTO-BW
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country GF: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -569,6 +588,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country GY:
 	(2402 - 2482 @ 40), (30)
 	(5735 - 5835 @ 80), (30)
@@ -580,12 +602,18 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4, ref: FCC/EU
+	(57240 - 65880 @ 2160), (40)
+
 country HN:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (24)
 	(5490 - 5730 @ 160), (24)
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country HR: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -637,7 +665,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	# 5.9ghz band
 	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
 	(5850 - 5870 @ 10), (30)
@@ -647,7 +674,7 @@
 	(5890 - 5910 @ 10), (30)
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
-        # 60 gHz band channels 1-4, ref: Etsi En 302 567
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country IL: DFS-ETSI
@@ -655,6 +682,9 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 
+	# 60 gHz band channels 1-4, base on Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country IN:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (23)
@@ -705,20 +735,25 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country JO:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23)
 	(5735 - 5835 @ 80), (23)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country JP: DFS-JP
 	(2402 - 2482 @ 40), (20)
 	(2474 - 2494 @ 20), (20), NO-OFDM
 	(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
 	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
 	(5490 - 5710 @ 160), (20), DFS
-	# 60 GHz band channels 2-4 at 10mW,
-	# ref: http://www.arib.or.jp/english/html/overview/doc/1-STD-T74v1_1.pdf
-	(59000 - 66000 @ 2160), (10 mW)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country KE: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -747,7 +782,7 @@
 	(5735 - 5835 @ 80), (30)
 	# 60 GHz band channels 1-4,
 	# ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
-	(57000 - 66000 @ 2160), (43)
+	(57240 - 65880 @ 2160), (43)
 
 country KP: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -800,6 +835,9 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country LK: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 20), (24)
@@ -861,6 +899,7 @@
 	(5890 - 5910 @ 10), (30)
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
+
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
@@ -869,6 +908,9 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country MC: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -905,8 +947,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country MN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -984,6 +1024,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country MY: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -991,6 +1034,9 @@
 	(5490 - 5650 @ 160), (24), DFS
 	(5735 - 5815 @ 80), (24)
 
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
+
 country NA: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1010,6 +1056,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country NL: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1056,6 +1105,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country OM: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1095,6 +1147,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country PK:
 	(2402 - 2482 @ 40), (30)
 	(5735 - 5835 @ 80), (30)
@@ -1167,6 +1222,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country QA:
 	(2402 - 2482 @ 40), (20)
 	(5735 - 5835 @ 80), (30)
@@ -1210,6 +1268,9 @@
 	(5490 - 5730 @ 160), (30)
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
 country RW: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -1247,6 +1308,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country SI: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1328,6 +1392,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
 country TN: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1338,8 +1405,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country TT:
 	(2402 - 2482 @ 40), (20)
@@ -1347,6 +1412,9 @@
 	(5490 - 5730 @ 160), (36)
 	(5735 - 5835 @ 80), (36)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country TW: DFS-FCC
 	(2402 - 2472 @ 40), (30)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -1354,6 +1422,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
+
 country TZ:
 	(2402 - 2482 @ 40), (20)
 	(5735 - 5835 @ 80), (30)
@@ -1372,7 +1443,7 @@
 	(5490 - 5670 @ 160), (20), DFS
 	(5735 - 5835 @ 80), (20)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57240 - 65880 @ 2160), (20)
 
 country UG: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1401,8 +1472,8 @@
 	(5910 - 5930 @ 10), (30)
 	# 60g band
 	# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
-	# channels 1,2,3, EIRP=40dBm(43dBm peak)
-	(57240 - 63720 @ 2160), (40)
+	# channels 1,2,3,4,5,6 EIRP=40dBm(43dBm peak)
+	(57240 - 70200 @ 2160), (40)
 
 country UY: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1410,6 +1481,9 @@
         (5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
 country UZ: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1441,6 +1515,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
+
 country VU: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -1466,7 +1543,6 @@
 	(5170 - 5250 @ 80), (20), NO-IR, AUTO-BW, NO-OUTDOOR
 	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
 	(5490 - 5710 @ 160), (20), DFS
-	(59000 - 66000 @ 2160), (10 mW)
 
 country YE:
 	(2402 - 2482 @ 40), (20)
@@ -1484,6 +1560,9 @@
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
 country ZW: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW