Merge "defconfig: arm64: sdm845: Enable config IP_NF_MATCH_RPFILTER" into msm-4.9
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index d28f7ba..2a8d506 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -6,6 +6,13 @@
 INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
 endif
 
+TARGET_KERNEL_MAKE_ENV := $(strip $(TARGET_KERNEL_MAKE_ENV))
+ifeq ($(TARGET_KERNEL_MAKE_ENV),)
+KERNEL_MAKE_ENV :=
+else
+KERNEL_MAKE_ENV := $(TARGET_KERNEL_MAKE_ENV)
+endif
+
 TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH))
 ifeq ($(TARGET_KERNEL_ARCH),)
 KERNEL_ARCH := arm
@@ -88,8 +95,8 @@
 endif
 
 KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr
-KERNEL_MODULES_INSTALL := system
-KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules
+KERNEL_MODULES_INSTALL ?= system
+KERNEL_MODULES_OUT ?= $(PRODUCT_OUT)/$(KERNEL_MODULES_INSTALL)/lib/modules
 
 TARGET_PREBUILT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)
 
@@ -121,26 +128,26 @@
 	mkdir -p $(KERNEL_OUT)
 
 $(KERNEL_CONFIG): $(KERNEL_OUT)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
 	$(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
 			echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
 
 $(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
 	$(hide) echo "Building kernel..."
 	$(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
 	$(mv-modules)
 	$(clean-module-folder)
 
 $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
 	$(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
 			rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;\
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;\
 			if [ -d "$(KERNEL_HEADERS_INSTALL)/include/bringup_headers" ]; then \
 				cp -Rf  $(KERNEL_HEADERS_INSTALL)/include/bringup_headers/* $(KERNEL_HEADERS_INSTALL)/include/ ;\
 			fi ;\
@@ -148,20 +155,20 @@
 	$(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \
 			echo "Used a different defconfig for header generation"; \
 			rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi
 	$(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
 			echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
 
 kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
 
 kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG)
 	env KCONFIG_NOTIMESTAMP=true \
-	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
+	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
 	env KCONFIG_NOTIMESTAMP=true \
-	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) savedefconfig
+	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) savedefconfig
 	cp $(KERNEL_OUT)/defconfig $(TARGET_KERNEL_SOURCE)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG)
 
 endif
diff --git a/Makefile b/Makefile
index 06a55b5..f834951 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 26
+SUBLEVEL = 27
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1f6d2cc..bcef117 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -208,6 +208,7 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_THERMAL=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 5d61163..5601276 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -199,6 +199,7 @@
 CONFIG_MSM_CDC_PINCTRL=y
 CONFIG_MSM_CDC_SUPPLY=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
 CONFIG_SOUND=y
 CONFIG_SND=y
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b861876..84867ba 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1105,6 +1105,11 @@
 	  Space separated list of names of dtbs to append when
 	  building a concatenated Image.gz-dtb.
 
+config BUILD_ARM64_DT_OVERLAY
+	bool "enable DT overlay compilation support"
+	depends on OF
+	help
+	  This option enables support for DT overlay compilation.
 endmenu
 
 menu "Userspace binary formats"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index b661fe7..3d9d6f3 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -62,6 +62,9 @@
 
 	  If in doubt, say N.
 
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+	bool "Enforce strict break-before-make on page table updates "
+
 source "drivers/hwtracing/coresight/Kconfig"
 
 endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 13a64c9..1570602 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -103,6 +103,10 @@
 
 KBUILD_DTBS	:= dtbs
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 all:	$(KBUILD_IMAGE) $(KBUILD_DTBS)
 
 boot := arch/arm64/boot
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 69dfe46..fcc09a0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -34,6 +34,7 @@
 	sound-tavil {
 		compatible = "qcom,sdm845-asoc-snd-tavil";
 		qcom,model = "sdm845-tavil-snd-card";
+		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
 		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 3ab0c70..947262fb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -458,6 +458,58 @@
 			};
 		};
 
+		sde_dp_aux_active: sde_dp_aux_active {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		sde_dp_aux_suspend: sde_dp_aux_suspend {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		sde_dp_usbplug_cc_active: sde_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		sde_dp_usbplug_cc_suspend: sde_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index e21ed36..4a8d06d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -11,6 +11,7 @@
  */
 
 #include "smb1355.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 /{
 	qrd_batterydata: qcom,battery-data {
@@ -18,6 +19,34 @@
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
 		#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
 	};
+
+	aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&qupv3_se6_4uart {
+	status = "ok";
 };
 
 &pmi8998_fg {
@@ -39,3 +68,53 @@
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
 	};
 };
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&ufsphy_card {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_card {
+	vdd-hba-supply = <&ufs_card_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l21>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <300000>;
+	vccq2-max-microamp = <300000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 72c2efa..95c1d65 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -821,6 +821,13 @@
 		interrupts = <1 5 4>;
 	};
 
+	clock_rpmh: qcom,rpmhclk {
+		compatible = "qcom,rpmh-clk-sdm845";
+		#clock-cells = <1>;
+		mboxes = <&apps_rsc 0>;
+		mbox-names = "apps";
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdm845", "syscon";
 		reg = <0x100000 0x1f0000>;
@@ -995,13 +1002,6 @@
 		#reset-cells = <1>;
 	};
 
-	clock_rpmh: qcom,rpmhclk {
-		compatible = "qcom,rpmh-clk-sdm845";
-		#clock-cells = <1>;
-		mboxes = <&apps_rsc 0>;
-		mbox-names = "apps";
-	};
-
 	clock_debug: qcom,cc-debug@100000 {
 		compatible = "qcom,debugcc-sdm845";
 		qcom,cc-count = <5>;
@@ -1678,6 +1678,11 @@
 			compatible = "qcom,msm-imem-kaslr_offset";
 			reg = <0x6d0 12>;
 		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
 	};
 
 	qcom,venus@aae0000 {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 58bc0ba..fe3eff2 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -215,6 +215,7 @@
 CONFIG_NET_ACT_MIRRED=y
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index bb55086..7d7f6f6 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -224,6 +224,7 @@
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ffbb9a5..875545d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
 #include <asm/bug.h>
 #include <asm/proc-fns.h>
 
+#include <asm/bug.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable-prot.h>
@@ -172,6 +173,34 @@
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+	pteval_t old = pte_val(*ptep);
+	pteval_t new = pte_val(pte);
+
+	/* Only problematic if valid -> valid */
+	if (!(old & new & PTE_VALID))
+		goto pte_ok;
+
+	/* Changing attributes should go via an invalid entry */
+	if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+		goto pte_bad;
+
+	/* Change of OA is only an issue if one mapping is writable */
+	if (!(old & new & PTE_RDONLY) &&
+	    WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+		goto pte_bad;
+
+	goto pte_ok;
+
+pte_bad:
+	*ptep = __pte(0);
+	dsb(ishst);
+	asm("tlbi	vmalle1is");
+	dsb(ish);
+	isb();
+pte_ok:
+#endif
+
 	*ptep = pte;
 
 	/*
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0c4a5ee..da845fd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -189,7 +189,7 @@
 	 * don't attempt to dump non-kernel addresses or
 	 * values that are probably just small negative numbers
 	 */
-	if (addr < PAGE_OFFSET || addr > -256UL)
+	if (addr < KIMAGE_VADDR || addr > -256UL)
 		return;
 
 	printk("\n%s: %#lx:\n", name, addr);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 40e775a..837bbab 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1953,20 +1953,32 @@
  *
  * Attaches specified io address space mapping to the provided device,
  * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. More than one client might be attached to
- * the same io address space mapping.
+ * IOMMU aware version. Only one device in an iommu_group may use this
+ * function.
  */
 int arm_iommu_attach_device(struct device *dev,
 			    struct dma_iommu_mapping *mapping)
 {
 	int err;
 	int s1_bypass = 0, is_fast = 0;
+	struct iommu_group *group;
+
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return -ENODEV;
+	}
+
+	if (iommu_get_domain_for_dev(dev)) {
+		dev_err(dev, "Device already attached to other iommu_domain\n");
+		return -EINVAL;
+	}
 
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
 	if (is_fast)
 		return fast_smmu_attach_device(dev, mapping);
 
-	err = iommu_attach_device(mapping->domain, dev);
+	err = iommu_attach_group(mapping->domain, group);
 	if (err)
 		return err;
 
@@ -1994,6 +2006,7 @@
 {
 	struct dma_iommu_mapping *mapping;
 	int is_fast, s1_bypass = 0;
+	struct iommu_group *group;
 
 	mapping = to_dma_iommu_mapping(dev);
 	if (!mapping) {
@@ -2013,7 +2026,13 @@
 	if (msm_dma_unmap_all_for_dev(dev))
 		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
 
-	iommu_detach_device(mapping->domain, dev);
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return;
+	}
+
+	iommu_detach_group(mapping->domain, group);
 	kref_put(&mapping->kref, release_iommu_mapping);
 	dev->archdata.mapping = NULL;
 	if (!s1_bypass)
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index b18a172..ee847d9f 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -306,7 +306,6 @@
 		.subsys = "slpi",
 		.link.link_info.edge = "dsps",
 		.link.link_info.transport = "smem",
-		.vmid = VMID_SSC_Q6,
 	},
 	{
 		.name = "cdsprpc-smd",
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149c..d0ac2d5 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -489,8 +489,7 @@
 int tpm_get_timeouts(struct tpm_chip *chip)
 {
 	struct tpm_cmd_t tpm_cmd;
-	unsigned long new_timeout[4];
-	unsigned long old_timeout[4];
+	unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
 	struct duration_t *duration_cap;
 	ssize_t rc;
 
@@ -542,11 +541,15 @@
 	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
 		return -EINVAL;
 
-	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
-	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
-	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
-	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
-	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+	timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+	timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+	timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+	timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+	timeout_chip[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+	timeout_chip[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+	timeout_chip[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+	timeout_chip[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+	memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
 
 	/*
 	 * Provide ability for vendor overrides of timeout values in case
@@ -554,16 +557,24 @@
 	 */
 	if (chip->ops->update_timeouts != NULL)
 		chip->timeout_adjusted =
-			chip->ops->update_timeouts(chip, new_timeout);
+			chip->ops->update_timeouts(chip, timeout_eff);
 
 	if (!chip->timeout_adjusted) {
-		/* Don't overwrite default if value is 0 */
-		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
-			int i;
+		/* Restore default if chip reported 0 */
+		int i;
 
+		for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+			if (timeout_eff[i])
+				continue;
+
+			timeout_eff[i] = timeout_old[i];
+			chip->timeout_adjusted = true;
+		}
+
+		if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
 			/* timeouts in msec rather usec */
-			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
-				new_timeout[i] *= 1000;
+			for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+				timeout_eff[i] *= 1000;
 			chip->timeout_adjusted = true;
 		}
 	}
@@ -572,16 +583,16 @@
 	if (chip->timeout_adjusted) {
 		dev_info(&chip->dev,
 			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
-			 old_timeout[0], new_timeout[0],
-			 old_timeout[1], new_timeout[1],
-			 old_timeout[2], new_timeout[2],
-			 old_timeout[3], new_timeout[3]);
+			 timeout_chip[0], timeout_eff[0],
+			 timeout_chip[1], timeout_eff[1],
+			 timeout_chip[2], timeout_eff[2],
+			 timeout_chip[3], timeout_eff[3]);
 	}
 
-	chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
-	chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
-	chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
-	chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
+	chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+	chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+	chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+	chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
 
 duration:
 	tpm_cmd.header.in = tpm_getcap_header;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 51a5e0b..5c4ddcc 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -343,6 +343,72 @@
 };
 EXPORT_SYMBOL_GPL(clk_branch2_ops);
 
+static int clk_branch2_hw_ctl_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	if (!(hw->init->flags & CLK_SET_RATE_PARENT)) {
+		pr_err("SET_RATE_PARENT flag needs to be set for %s\n",
+					clk_hw_get_name(hw));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	return parent_rate;
+}
+
+static int clk_branch2_hw_ctl_determine_rate(struct clk_hw *hw,
+		struct clk_rate_request *req)
+{
+	struct clk_hw *clkp;
+
+	clkp = clk_hw_get_parent(hw);
+	if (!clkp)
+		return -EINVAL;
+
+	req->best_parent_hw = clkp;
+	req->best_parent_rate = clk_round_rate(clkp->clk, req->rate);
+
+	return 0;
+}
+
+static int clk_branch2_hw_ctl_enable(struct clk_hw *hw)
+{
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+
+	/* The parent branch clock should have been prepared prior to this. */
+	if (!parent || (parent && !clk_hw_is_prepared(parent)))
+		return -EINVAL;
+
+	return clk_enable_regmap(hw);
+}
+
+static void clk_branch2_hw_ctl_disable(struct clk_hw *hw)
+{
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+
+	if (!parent)
+		return;
+
+	clk_disable_regmap(hw);
+}
+
+const struct clk_ops clk_branch2_hw_ctl_ops = {
+	.enable = clk_branch2_hw_ctl_enable,
+	.disable = clk_branch2_hw_ctl_disable,
+	.is_enabled = clk_is_enabled_regmap,
+	.set_rate = clk_branch2_hw_ctl_set_rate,
+	.recalc_rate = clk_branch2_hw_ctl_recalc_rate,
+	.determine_rate = clk_branch2_hw_ctl_determine_rate,
+	.set_flags = clk_branch_set_flags,
+	.list_registers = clk_branch2_list_registers,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_hw_ctl_ops);
+
 static int clk_gate_toggle(struct clk_hw *hw, bool en)
 {
 	struct clk_gate2 *gt = to_clk_gate2(hw);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 51209ea..f0fb6d5 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -64,6 +64,7 @@
 
 extern const struct clk_ops clk_branch_ops;
 extern const struct clk_ops clk_branch2_ops;
+extern const struct clk_ops clk_branch2_hw_ctl_ops;
 extern const struct clk_ops clk_gate2_ops;
 extern const struct clk_ops clk_branch_simple_ops;
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 2f9cfdf..3d101ac 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1064,17 +1064,10 @@
 }
 
 static int clk_dp_determine_rate(struct clk_hw *hw,
-				struct clk_rate_request *req)
+		struct clk_rate_request *req)
 {
-	if (!hw)
-		return -EINVAL;
-
-	if (!clk_hw_get_parent(hw)) {
-		pr_err("Missing the parent for the DP RCG\n");
-		return -EINVAL;
-	}
-
-	req->best_parent_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+	req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw,
+							req->best_parent_rate);
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 3b56fa1..d3a28e6 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -80,8 +80,8 @@
 
 static const char * const disp_cc_parent_names_1[] = {
 	"bi_tcxo",
-	"dp_phy_pll_link_clk",
-	"dp_phy_pll_vco_div_clk",
+	"dp_link_clk_divsel_ten",
+	"dp_vco_divided_clk_src_mux",
 	"core_bi_pll_test_se",
 };
 
@@ -217,12 +217,11 @@
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
-	F( 108000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 180000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 360000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 540000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 108000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 180000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 360000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
 	{ }
 };
 
@@ -236,23 +235,22 @@
 		.name = "disp_cc_mdss_dp_crypto_clk_src",
 		.parent_names = disp_cc_parent_names_1,
 		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 12800000,
-			LOWER, 108000000,
-			LOW, 180000000,
-			LOW_L1, 360000000,
-			NOMINAL, 540000000),
+			MIN, 12800,
+			LOWER, 108000,
+			LOW, 180000,
+			LOW_L1, 360000,
+			NOMINAL, 540000),
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
-	F_SLEW( 162000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  324000000),
-	F_SLEW( 270000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  540000000),
-	F_SLEW( 540000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1080000000),
-	F_SLEW( 810000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1620000000),
+	F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 810000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
 	{ }
 };
 
@@ -269,11 +267,11 @@
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 19200000,
-			LOWER, 162000000,
-			LOW, 270000000,
-			LOW_L1, 540000000,
-			NOMINAL, 810000000),
+			MIN, 19200,
+			LOWER, 162000,
+			LOW, 270000,
+			LOW_L1, 540000,
+			NOMINAL, 810000),
 	},
 };
 
@@ -284,17 +282,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel1_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -305,17 +301,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -664,23 +658,7 @@
 	},
 };
 
-static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
-	.reg = 0x2150,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_link_div_clk_src",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
 static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
 	.halt_reg = 0x2044,
 	.halt_check = BRANCH_HALT,
@@ -690,10 +668,10 @@
 		.hw.init = &(struct clk_init_data){
 			.name = "disp_cc_mdss_dp_link_intf_clk",
 			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_div_clk_src",
+				"disp_cc_mdss_dp_link_clk_src",
 			},
 			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.flags = CLK_GET_RATE_NOCACHE,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -955,8 +933,6 @@
 					&disp_cc_mdss_dp_crypto_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
-	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
-					&disp_cc_mdss_dp_link_div_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 678dd10..19041e7 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -889,7 +889,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_gp1_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_axi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -917,7 +917,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_ice_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -937,6 +937,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_4,
 	.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_4,
@@ -961,7 +962,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_unipro_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -990,7 +991,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_axi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1011,7 +1012,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_ice_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1031,6 +1032,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_4,
 	.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_4,
@@ -1048,6 +1050,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_unipro_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1225,6 +1228,23 @@
 	},
 };
 
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+	.halt_reg = 0x82028,
+	.clkr = {
+		.enable_reg = 0x82028,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_aggre_ufs_card_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
 	.halt_reg = 0x82024,
 	.halt_check = BRANCH_HALT,
@@ -1243,6 +1263,23 @@
 	},
 };
 
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x82024,
+	.clkr = {
+		.enable_reg = 0x82024,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_aggre_ufs_phy_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
 	.halt_reg = 0x8201c,
 	.halt_check = BRANCH_HALT,
@@ -2575,6 +2612,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+	.halt_reg = 0x7500c,
+	.clkr = {
+		.enable_reg = 0x7500c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_card_clkref_clk = {
 	.halt_reg = 0x8c004,
 	.halt_check = BRANCH_HALT,
@@ -2606,6 +2660,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+	.halt_reg = 0x75058,
+	.clkr = {
+		.enable_reg = 0x75058,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_ice_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_card_phy_aux_clk = {
 	.halt_reg = 0x7508c,
 	.halt_check = BRANCH_HALT,
@@ -2624,6 +2695,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+	.halt_reg = 0x7508c,
+	.clkr = {
+		.enable_reg = 0x7508c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_phy_aux_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_gate2 gcc_ufs_card_rx_symbol_0_clk = {
 	.udelay = 500,
 	.clkr = {
@@ -2678,6 +2766,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+	.halt_reg = 0x75054,
+	.clkr = {
+		.enable_reg = 0x75054,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_unipro_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_mem_clkref_clk = {
 	.halt_reg = 0x8c000,
 	.halt_check = BRANCH_HALT,
@@ -2722,6 +2827,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x7700c,
+	.clkr = {
+		.enable_reg = 0x7700c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_phy_ice_core_clk = {
 	.halt_reg = 0x77058,
 	.halt_check = BRANCH_HALT,
@@ -2740,6 +2862,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+	.halt_reg = 0x77058,
+	.clkr = {
+		.enable_reg = 0x77058,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
 	.halt_reg = 0x7708c,
 	.halt_check = BRANCH_HALT,
@@ -2758,6 +2897,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+	.halt_reg = 0x7708c,
+	.clkr = {
+		.enable_reg = 0x7708c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_gate2 gcc_ufs_phy_rx_symbol_0_clk = {
 	.udelay = 500,
 	.clkr = {
@@ -2812,6 +2968,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+	.halt_reg = 0x77054,
+	.clkr = {
+		.enable_reg = 0x77054,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_usb30_prim_master_clk = {
 	.halt_reg = 0xf00c,
 	.halt_check = BRANCH_HALT,
@@ -3094,7 +3267,11 @@
 static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+	[GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] =
+				&gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
 	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+	[GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] =
+				&gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
 	[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
 	[GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
 	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
@@ -3212,30 +3389,43 @@
 	[GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
 	[GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
 	[GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+	[GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
 	[GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
 	[GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+	[GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] =
+				&gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
 	[GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+	[GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] =
+				&gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
 	[GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
 	[GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
 	[GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
 	[GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+	[GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] =
+				&gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
 					&gcc_ufs_card_unipro_core_clk_src.clkr,
 	[GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
 	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+	[GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
 	[GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
 	[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+	[GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
 	[GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
 	[GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
 	[GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
 	[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
 					&gcc_ufs_phy_unipro_core_clk_src.clkr,
 	[GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
@@ -3391,10 +3581,7 @@
 
 	/*
 	 * TODO:
-	 * 1. Support HW clock measurement
-	 * 2. Support UFS clock hw_ctrl
-	 * 3. Support mux clock interface for pcie pipe clocks
-	 * 4. QUPv3 support
+	 * 1. QUPv3 support
 	 */
 
 	dev_info(&pdev->dev, "Registered GCC clocks\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 9a71ea0..5604bf1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -259,15 +259,11 @@
 			       dsi_ctrl->cell_index, op_state);
 			rc = -EINVAL;
 		} else if (state->power_state == DSI_CTRL_POWER_VREG_ON) {
-			if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->controller_state == DSI_CTRL_ENGINE_ON)) {
-				pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+			if (state->vid_engine_state == DSI_CTRL_ENGINE_ON) {
+				pr_debug("[%d]State error: op=%d: %d\n",
 				       dsi_ctrl->cell_index,
 				       op_state,
-				       state->cmd_engine_state,
-				       state->vid_engine_state,
-				       state->controller_state);
+				       state->vid_engine_state);
 				rc = -EINVAL;
 			}
 		}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 86db16e..3402d48 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -3113,6 +3113,11 @@
 		pr_err("[%s] panel post-enable failed, rc=%d\n",
 		       display->name, rc);
 
+	/* remove the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+
 	mutex_unlock(&display->display_lock);
 	return rc;
 }
@@ -3128,6 +3133,11 @@
 
 	mutex_lock(&display->display_lock);
 
+	/* enable the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+
 	rc = dsi_panel_pre_disable(display->panel);
 	if (rc)
 		pr_err("[%s] panel pre-disable failed, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index ebfb40b8..a1a0e57 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -791,23 +791,12 @@
 		   bool skip_validation)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy || !config) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 
 	if (!skip_validation)
@@ -839,10 +828,6 @@
 error:
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
 	return rc;
 }
 
@@ -855,34 +840,17 @@
 int dsi_phy_disable(struct msm_dsi_phy *phy)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 	dsi_phy_disable_hw(phy);
 	phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
-
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 64e9544..322b7f2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -165,6 +165,7 @@
 	CONNECTOR_PROP_TOPOLOGY_NAME,
 	CONNECTOR_PROP_TOPOLOGY_CONTROL,
 	CONNECTOR_PROP_AUTOREFRESH,
+	CONNECTOR_PROP_LP,
 
 	/* total # of properties */
 	CONNECTOR_PROP_COUNT
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 9f8d7ee..58222f3 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt)	"sde-drm:[%s] " fmt, __func__
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include "msm_drv.h"
 
 #include "sde_kms.h"
@@ -44,6 +44,12 @@
 	{SDE_RM_TOPCTL_FORCE_TILING,	"force_tiling"},
 	{SDE_RM_TOPCTL_PPSPLIT,		"ppsplit"}
 };
+static const struct drm_prop_enum_list e_power_mode[] = {
+	{SDE_MODE_DPMS_ON,	"ON"},
+	{SDE_MODE_DPMS_LP1,	"LP1"},
+	{SDE_MODE_DPMS_LP2,	"LP2"},
+	{SDE_MODE_DPMS_OFF,	"OFF"},
+};
 
 static int sde_backlight_device_update_status(struct backlight_device *bd)
 {
@@ -294,6 +300,7 @@
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
+	mutex_destroy(&c_conn->lock);
 	sde_fence_deinit(&c_conn->retire_fence);
 	drm_connector_cleanup(connector);
 	kfree(c_conn);
@@ -541,6 +548,56 @@
 	return 0;
 }
 
+static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
+{
+	struct drm_connector *connector;
+	void *display;
+	int (*set_power)(struct drm_connector *, int, void *);
+	int mode, rc = 0;
+
+	if (!c_conn)
+		return -EINVAL;
+	connector = &c_conn->base;
+
+	mode = c_conn->lp_mode;
+	if (c_conn->dpms_mode != DRM_MODE_DPMS_ON)
+		mode = SDE_MODE_DPMS_OFF;
+	switch (c_conn->dpms_mode) {
+	case DRM_MODE_DPMS_ON:
+		mode = c_conn->lp_mode;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		mode = SDE_MODE_DPMS_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		mode = SDE_MODE_DPMS_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		mode = SDE_MODE_DPMS_OFF;
+		break;
+	default:
+		mode = c_conn->lp_mode;
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+				connector->base.id, mode);
+		break;
+	}
+
+	SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id,
+			c_conn->dpms_mode, c_conn->lp_mode, mode);
+
+	if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) {
+		display = c_conn->display;
+		set_power = c_conn->ops.set_power;
+
+		mutex_unlock(&c_conn->lock);
+		rc = set_power(connector, mode, display);
+		mutex_lock(&c_conn->lock);
+	}
+	c_conn->last_panel_power_mode = mode;
+
+	return rc;
+}
+
 static int sde_connector_atomic_set_property(struct drm_connector *connector,
 		struct drm_connector_state *state,
 		struct drm_property *property,
@@ -567,8 +624,8 @@
 
 	/* connector-specific property handling */
 	idx = msm_property_index(&c_conn->property_info, property);
-
-	if (idx == CONNECTOR_PROP_OUT_FB) {
+	switch (idx) {
+	case CONNECTOR_PROP_OUT_FB:
 		/* clear old fb, if present */
 		if (c_state->out_fb)
 			_sde_connector_destroy_fb(c_conn, c_state);
@@ -598,12 +655,20 @@
 			if (rc)
 				SDE_ERROR("prep fb failed, %d\n", rc);
 		}
-	}
-
-	if (idx == CONNECTOR_PROP_TOPOLOGY_CONTROL) {
+		break;
+	case CONNECTOR_PROP_TOPOLOGY_CONTROL:
 		rc = sde_rm_check_property_topctl(val);
 		if (rc)
 			SDE_ERROR("invalid topology_control: 0x%llX\n", val);
+		break;
+	case CONNECTOR_PROP_LP:
+		mutex_lock(&c_conn->lock);
+		c_conn->lp_mode = val;
+		_sde_connector_update_power_locked(c_conn);
+		mutex_unlock(&c_conn->lock);
+		break;
+	default:
+		break;
 	}
 
 	if (idx == CONNECTOR_PROP_ROI_V1) {
@@ -719,6 +784,59 @@
 	return status;
 }
 
+static int sde_connector_dpms(struct drm_connector *connector,
+				     int mode)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return -EINVAL;
+	}
+	c_conn = to_sde_connector(connector);
+
+	/* validate incoming dpms request */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		SDE_DEBUG("conn %d dpms set to %d\n", connector->base.id, mode);
+		break;
+	default:
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+				connector->base.id, mode);
+		break;
+	}
+
+	mutex_lock(&c_conn->lock);
+	c_conn->dpms_mode = mode;
+	_sde_connector_update_power_locked(c_conn);
+	mutex_unlock(&c_conn->lock);
+
+	/* use helper for boilerplate handling */
+	return drm_atomic_helper_connector_dpms(connector, mode);
+}
+
+int sde_connector_get_dpms(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	int rc;
+
+	if (!connector) {
+		SDE_DEBUG("invalid connector\n");
+		return DRM_MODE_DPMS_OFF;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	mutex_lock(&c_conn->lock);
+	rc = c_conn->dpms_mode;
+	mutex_unlock(&c_conn->lock);
+
+	return rc;
+}
+
 #ifdef CONFIG_DEBUG_FS
 /**
  * sde_connector_init_debugfs - initialize connector debugfs
@@ -761,7 +879,7 @@
 }
 
 static const struct drm_connector_funcs sde_connector_ops = {
-	.dpms =                   drm_atomic_helper_connector_dpms,
+	.dpms =                   sde_connector_dpms,
 	.reset =                  sde_connector_atomic_reset,
 	.detect =                 sde_connector_detect,
 	.destroy =                sde_connector_destroy,
@@ -885,6 +1003,10 @@
 	c_conn->panel = panel;
 	c_conn->display = display;
 
+	c_conn->dpms_mode = DRM_MODE_DPMS_ON;
+	c_conn->lp_mode = 0;
+	c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
+
 	/* cache mmu_id's for later */
 	sde_kms = to_sde_kms(priv->kms);
 	if (sde_kms->vbif[VBIF_NRT]) {
@@ -919,6 +1041,8 @@
 		goto error_cleanup_conn;
 	}
 
+	mutex_init(&c_conn->lock);
+
 	rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
 	if (rc) {
 		SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
@@ -1006,6 +1130,10 @@
 			0, 1, e_topology_control,
 			ARRAY_SIZE(e_topology_control),
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
+	msm_property_install_enum(&c_conn->property_info, "LP",
+			0, 0, e_power_mode,
+			ARRAY_SIZE(e_power_mode),
+			CONNECTOR_PROP_LP);
 
 	rc = msm_property_install_get_status(&c_conn->property_info);
 	if (rc) {
@@ -1027,6 +1155,7 @@
 		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
+	mutex_destroy(&c_conn->lock);
 	sde_fence_deinit(&c_conn->retire_fence);
 error_cleanup_conn:
 	drm_connector_cleanup(&c_conn->base);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index c8c0eed..71e64e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -169,6 +169,20 @@
 	 * @enable: State of clks
 	 */
 	int (*clk_ctrl)(void *handle, u32 type, u32 state);
+
+	/**
+	 * set_power - update dpms setting
+	 * @connector: Pointer to drm connector structure
+	 * @power_mode: One of the following,
+	 *              SDE_MODE_DPMS_ON
+	 *              SDE_MODE_DPMS_LP1
+	 *              SDE_MODE_DPMS_LP2
+	 *              SDE_MODE_DPMS_OFF
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*set_power)(struct drm_connector *connector,
+			int power_mode, void *display);
 };
 
 /**
@@ -203,8 +217,12 @@
  * @mmu_secure: MMU id for secure buffers
  * @mmu_unsecure: MMU id for unsecure buffers
  * @name: ASCII name of connector
+ * @lock: Mutex lock object for this structure
  * @retire_fence: Retire fence context reference
  * @ops: Local callback function pointer table
+ * @dpms_mode: DPMS property setting from user space
+ * @lp_mode: LP property setting from user space
+ * @last_panel_power_mode: Last consolidated dpms/lp mode setting
  * @property_info: Private structure for generic property handling
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
@@ -226,8 +244,12 @@
 
 	char name[SDE_CONNECTOR_NAME_SIZE];
 
+	struct mutex lock;
 	struct sde_fence_context retire_fence;
 	struct sde_connector_ops ops;
+	int dpms_mode;
+	int lp_mode;
+	int last_panel_power_mode;
 
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
@@ -392,6 +414,13 @@
 void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
 
 /**
+ * sde_connector_get_dpms - query dpms setting
+ * @connector: Pointer to drm connector structure
+ * Returns: Current DPMS setting for connector
+ */
+int sde_connector_get_dpms(struct drm_connector *connector);
+
+/**
  * sde_connector_trigger_event - indicate that an event has occurred
  *	Any callbacks that have been registered against this event will
  *	be called from the same thread context.
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index f2d78cb..1bd7654 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -864,55 +864,82 @@
 	lm_bounds = &crtc_state->lm_bounds[lm_idx];
 	lm_roi = &crtc_state->lm_roi[lm_idx];
 
-	if (!sde_kms_rect_is_null(crtc_roi)) {
-		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
-		if (sde_kms_rect_is_null(lm_roi)) {
-			SDE_ERROR("unsupported R/L only partial update\n");
-			return -EINVAL;
-		}
-	} else {
+	if (sde_kms_rect_is_null(crtc_roi))
 		memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
-	}
+	else
+		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
 
 	SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
 			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
 
+	/* if any dimension is zero, clear all dimensions for clarity */
+	if (sde_kms_rect_is_null(lm_roi))
+		memset(lm_roi, 0, sizeof(*lm_roi));
+
 	return 0;
 }
 
+static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *crtc_state;
+	u32 disp_bitmask = 0;
+	int i;
+
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+			disp_bitmask |= BIT(i);
+	}
+
+	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
+
+	return disp_bitmask;
+}
+
 static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *roi_prv, *roi_cur;
-	int lm_idx;
+	const struct sde_rect *roi[CRTC_DUAL_MIXERS];
 
 	if (!crtc || !state)
 		return -EINVAL;
 
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
+	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("%s: unsupported number of mixers: %d\n",
+				sde_crtc->name, sde_crtc->num_mixers);
+		return -EINVAL;
+	}
+
 	/*
 	 * On certain HW, ROIs must be centered on the split between LMs,
 	 * and be of equal width.
 	 */
+	roi[0] = &crtc_state->lm_roi[0];
+	roi[1] = &crtc_state->lm_roi[1];
 
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
+	/* if one of the roi is null it's a left/right-only update */
+	if (sde_kms_rect_is_null(roi[0]) || sde_kms_rect_is_null(roi[1]))
+		return 0;
 
-	roi_prv = &crtc_state->lm_roi[0];
-	for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-		roi_cur = &crtc_state->lm_roi[lm_idx];
-
-		/* check lm rois are equal width & first roi ends at 2nd roi */
-		if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
-				(roi_prv->w != roi_cur->w)) {
-			SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
-					sde_crtc->name,
-					lm_idx-1, roi_prv->x, roi_prv->w,
-					lm_idx, roi_cur->x, roi_cur->w);
-			return -EINVAL;
-		}
-		roi_prv = roi_cur;
+	/* check lm rois are equal width & first roi ends at 2nd roi */
+	if (roi[0]->x + roi[0]->w != roi[1]->x || roi[0]->w != roi[1]->w) {
+		SDE_ERROR(
+			"%s: rois not centered and symmetric: roi0 x %d w %d roi1 x %d w %d\n",
+				sde_crtc->name, roi[0]->x, roi[0]->w,
+				roi[1]->x, roi[1]->w);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -1188,13 +1215,21 @@
  */
 static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *sde_crtc_state;
+	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_mixer *lm;
 
 	int i;
 
+	if (!crtc)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_crtc_state = to_sde_crtc_state(crtc->state);
+	mixer = sde_crtc->mixers;
+
 	SDE_DEBUG("%s\n", sde_crtc->name);
 
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
@@ -1225,9 +1260,19 @@
 	_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
+
 		ctl = mixer[i].hw_ctl;
 		lm = mixer[i].hw_lm;
 
+		if (sde_kms_rect_is_null(lm_roi)) {
+			SDE_DEBUG(
+				"%s: lm%d leave ctl%d mask 0 since null roi\n",
+					sde_crtc->name, lm->idx - LM_0,
+					ctl->idx - CTL_0);
+			continue;
+		}
+
 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 
 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
@@ -1720,9 +1765,9 @@
 		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
 	struct drm_device *dev;
 	unsigned long flags;
-	u32 i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1753,12 +1798,12 @@
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
-	/* Reset flush mask from previous commit */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
 
-		if (ctl)
-			ctl->ops.clear_pending_flush(ctl);
+		/* encoder will trigger pending mask now */
+		sde_encoder_trigger_kickoff_pending(encoder);
 	}
 
 	/*
@@ -1901,6 +1946,14 @@
 	priv = sde_kms->dev->dev_private;
 	cstate = to_sde_crtc_state(crtc->state);
 
+	/*
+	 * If no mixers has been allocated in sde_crtc_atomic_check(),
+	 * it means we are trying to start a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!sde_crtc->num_mixers))
+		return;
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct sde_encoder_kickoff_params params = { 0 };
 
@@ -1912,6 +1965,8 @@
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
 		params.inline_rotate_prefill = cstate->sbuf_prefill_line;
+		params.affected_displays = _sde_crtc_get_displays_affected(crtc,
+				crtc->state);
 		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
@@ -2143,21 +2198,62 @@
 	return 0;
 }
 
+static void sde_crtc_handle_power_event(u32 event_type, void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	SDE_EVT32(DRMID(crtc), event_type);
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
+		/* restore encoder; crtc will be programmed during commit */
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+
+			sde_encoder_virt_restore(encoder);
+		}
+
+	} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
+		struct drm_plane *plane;
+
+		/*
+		 * set revalidate flag in planes, so it will be re-programmed
+		 * in the next frame update
+		 */
+		drm_atomic_crtc_for_each_plane(plane, crtc)
+			sde_plane_set_revalidate(plane, true);
+	}
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc || !crtc->dev || !crtc->state) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
@@ -2197,6 +2293,10 @@
 		cstate->rsc_update = false;
 	}
 
+	if (sde_crtc->power_event)
+		sde_power_handle_unregister_event(&priv->phandle,
+				sde_crtc->power_event);
+
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 
@@ -2218,14 +2318,16 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32(DRMID(crtc));
@@ -2248,6 +2350,11 @@
 				sde_crtc->name, node->event);
 	}
 	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	sde_crtc->power_event = sde_power_handle_register_event(
+		&priv->phandle,
+		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+		sde_crtc_handle_power_event, crtc, sde_crtc->name);
 }
 
 struct plane_state {
@@ -2310,6 +2417,10 @@
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
+	/* force a full mode set if active state changed */
+	if (state->active_changed)
+		state->mode_changed = true;
+
 	memset(pipe_staged, 0, sizeof(pipe_staged));
 
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 98ba711..ec5ec1d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -138,6 +138,7 @@
  * @event_free_list : List of available event structures
  * @event_lock    : Spinlock around event handling code
  * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @power_event   : registered power event handle
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -187,6 +188,8 @@
 	struct list_head event_free_list;
 	spinlock_t event_lock;
 	bool misr_enable;
+
+	struct sde_power_event *power_event;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -248,10 +251,10 @@
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
- * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
- *                  Origin top left of CRTC.
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
  * @lm_roi        : Current LM ROI, possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @user_roi_list : List of user's requested ROIs as from set property
@@ -274,8 +277,8 @@
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 
-	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect crtc_roi;
+	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
 	struct msm_roi_list user_roi_list;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 3357642..a136645 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1169,34 +1169,79 @@
 	}
 }
 
-static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
-	int i = 0;
 	int ret = 0;
 
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
 		return;
-	} else if (!drm_enc->dev) {
-		SDE_ERROR("invalid dev\n");
-		return;
-	} else if (!drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid dev_private\n");
+	}
+	priv = drm_enc->dev->dev_private;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc || !sde_enc->cur_master) {
+		SDE_ERROR("invalid sde encoder/master\n");
 		return;
 	}
 
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
-
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
 		return;
 	}
 
+	if (sde_enc->cur_master->hw_mdptop &&
+			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+				sde_enc->cur_master->hw_mdptop,
+				sde_kms->catalog);
+
+	if (_sde_is_dsc_enabled(sde_enc)) {
+		ret = _sde_encoder_dsc_setup(sde_enc);
+		if (ret)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
+	}
+}
+
+void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && (phys != sde_enc->cur_master) && phys->ops.restore)
+			phys->ops.restore(phys);
+	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->ops.restore)
+		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
+
+	_sde_encoder_virt_enable_helper(drm_enc);
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i, ret = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
 
@@ -1230,21 +1275,10 @@
 			phys->ops.enable(phys);
 	}
 
-	if (sde_enc->cur_master && sde_enc->cur_master->ops.enable)
+	if (sde_enc->cur_master->ops.enable)
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
-	if (sde_enc->cur_master && sde_enc->cur_master->hw_mdptop &&
-			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
-		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
-				sde_enc->cur_master->hw_mdptop,
-				sde_kms->catalog);
-
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n",
-					ret);
-	}
+	_sde_encoder_virt_enable_helper(drm_enc);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1463,6 +1497,14 @@
 		return;
 	}
 
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip flush pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
+
 	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
@@ -1484,11 +1526,21 @@
  */
 static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
 {
+	struct sde_hw_ctl *ctl;
+
 	if (!phys) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
+	ctl = phys->hw_ctl;
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip start pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
 	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
 		phys->ops.trigger_start(phys);
 }
@@ -1620,9 +1672,13 @@
 			topology = sde_connector_get_topology_name(
 					phys->connector);
 
-		/* don't wait on ppsplit slaves, they dont register irqs */
+		/*
+		 * don't wait on ppsplit slaves or skipped encoders because
+		 * they dont receive irqs
+		 */
 		if (!(topology == SDE_RM_TOPOLOGY_PPSPLIT &&
-				phys->split_role == ENC_ROLE_SLAVE))
+				phys->split_role == ENC_ROLE_SLAVE) &&
+				phys->split_role != ENC_ROLE_SKIP)
 			set_bit(i, sde_enc->frame_busy_mask);
 
 		if (!phys->ops.needs_single_flush ||
@@ -1645,6 +1701,92 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	int i, num_active_phys;
+	bool master_assigned = false;
+
+	if (!drm_enc || !params)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (sde_enc->num_phys_encs <= 1)
+		return;
+
+	/* count bits set */
+	num_active_phys = hweight_long(params->affected_displays);
+
+	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
+			params->affected_displays, num_active_phys);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		enum sde_enc_split_role prv_role, new_role;
+		bool active;
+
+		phys = sde_enc->phys_encs[i];
+		if (!phys || !phys->ops.update_split_role)
+			continue;
+
+		active = test_bit(i, &params->affected_displays);
+		prv_role = phys->split_role;
+
+		if (active && num_active_phys == 1)
+			new_role = ENC_ROLE_SOLO;
+		else if (active && !master_assigned)
+			new_role = ENC_ROLE_MASTER;
+		else if (active)
+			new_role = ENC_ROLE_SLAVE;
+		else
+			new_role = ENC_ROLE_SKIP;
+
+		phys->ops.update_split_role(phys, new_role);
+		if (new_role == ENC_ROLE_SOLO || new_role == ENC_ROLE_MASTER) {
+			sde_enc->cur_master = phys;
+			master_assigned = true;
+		}
+
+		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active);
+	}
+}
+
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	unsigned int i;
+	struct sde_hw_ctl *ctl;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->hw_ctl) {
+			ctl = phys->hw_ctl;
+			if (ctl->ops.clear_pending_flush)
+				ctl->ops.clear_pending_flush(ctl);
+
+			/* update only for command mode primary ctl */
+			if ((phys == sde_enc->cur_master) &&
+			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			    && ctl->ops.trigger_pending)
+				ctl->ops.trigger_pending(ctl);
+		}
+	}
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1654,8 +1796,8 @@
 	unsigned int i;
 	int rc;
 
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!drm_enc || !params) {
+		SDE_ERROR("invalid args\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
@@ -1678,6 +1820,7 @@
 
 	/* if any phys needs reset, reset all phys, in-order */
 	if (needs_hw_reset) {
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			phys = sde_enc->phys_encs[i];
 			if (phys && phys->ops.hw_reset)
@@ -1685,6 +1828,8 @@
 		}
 	}
 
+	_sde_encoder_update_master(drm_enc, params);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 5795e04..c5ddee6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -47,9 +47,12 @@
 /**
  * sde_encoder_kickoff_params - info encoder requires at kickoff
  * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
  */
 struct sde_encoder_kickoff_params {
 	u32 inline_rotate_prefill;
+	unsigned long affected_displays;
 };
 
 /**
@@ -101,6 +104,13 @@
 		struct sde_encoder_kickoff_params *params);
 
 /**
+ * sde_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
@@ -124,6 +134,12 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
  * enum sde_encoder_property - property tags for sde enoder
  * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
  */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6942292..a3b112d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -41,11 +41,13 @@
  * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
  * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
  * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ * @ENC_ROLE_SKIP:	This encoder is not participating in kickoffs
  */
 enum sde_enc_split_role {
 	ENC_ROLE_SOLO,
 	ENC_ROLE_MASTER,
-	ENC_ROLE_SLAVE
+	ENC_ROLE_SLAVE,
+	ENC_ROLE_SKIP
 };
 
 /**
@@ -118,6 +120,8 @@
  * @hw_reset:			Issue HW recovery such as CTL reset and clear
  *				SDE_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @update_split_role:		Update the split role of the phys enc
+ * @restore:			Restore all the encoder configs.
  */
 
 struct sde_encoder_phys_ops {
@@ -152,6 +156,9 @@
 	u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
 	void (*hw_reset)(struct sde_encoder_phys *phys_enc);
 	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
+	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
+			enum sde_enc_split_role role);
+	void (*restore)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -165,6 +172,7 @@
 	INTR_IDX_VSYNC,
 	INTR_IDX_PINGPONG,
 	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
 	INTR_IDX_RDPTR,
 	INTR_IDX_MAX,
 };
@@ -198,6 +206,8 @@
  *				vs. the number of done/vblank irqs. Should hover
  *				between 0-2 Incremented when a new kickoff is
  *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
  */
 struct sde_encoder_phys {
@@ -221,12 +231,14 @@
 	atomic_t vblank_refcount;
 	atomic_t vsync_cnt;
 	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
 	atomic_t pending_kickoff_cnt;
 	wait_queue_head_t pending_kickoff_wq;
 };
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
 {
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
 	return atomic_inc_return(&phys->pending_kickoff_cnt);
 }
 
@@ -265,7 +277,6 @@
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
-	int intf_idx;
 	int stream_sel;
 	int irq_idx[INTR_IDX_MAX];
 	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index a4f40f2..572bd9e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -21,18 +21,21 @@
 #define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
 #define PP_TIMEOUT_MAX_TRIALS	10
 
+/* wait for 2 vyncs only */
+#define CTL_START_TIMEOUT_MS	32
+
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -57,6 +60,46 @@
 	return true;
 }
 
+static void _sde_encoder_phys_cmd_update_flush_mask(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.get_bitmask_intf ||
+			!ctl->ops.update_pending_flush)
+		return;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+			ctl->idx - CTL_0, flush_mask);
+}
+
+static void _sde_encoder_phys_cmd_update_intf_cfg(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -130,11 +173,35 @@
 	if (!cmd_enc)
 		return;
 
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 }
 
+static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+	struct sde_hw_ctl *ctl;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	if (!phys_enc->hw_ctl)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
 static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
 {
 	enum sde_rm_topology_name topology;
@@ -240,7 +307,7 @@
 	if (ret <= 0) {
 		/* read and clear interrupt */
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				INTR_IDX_PINGPONG, true);
+				cmd_enc->irq_idx[INTR_IDX_PINGPONG], true);
 		if (irq_status) {
 			unsigned long flags;
 			SDE_EVT32(DRMID(phys_enc->parent),
@@ -295,8 +362,13 @@
 		return -EINVAL;
 	}
 
-	idx_lookup = (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN) ?
-			cmd_enc->intf_idx : phys_enc->hw_pp->idx;
+	if (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN)
+		idx_lookup = phys_enc->intf_idx;
+	else if (intr_type == SDE_IRQ_TYPE_CTL_START)
+		idx_lookup = phys_enc->hw_ctl ? phys_enc->hw_ctl->idx : -1;
+	else
+		idx_lookup = phys_enc->hw_pp->idx;
+
 	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
 			intr_type, idx_lookup);
 	if (cmd_enc->irq_idx[idx] < 0) {
@@ -409,9 +481,13 @@
 void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
 		bool enable)
 {
+	struct sde_encoder_phys_cmd *cmd_enc;
+
 	if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
 		return;
 
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
 	if (enable) {
 		sde_encoder_phys_cmd_register_irq(phys_enc,
 				SDE_IRQ_TYPE_PING_PONG_COMP,
@@ -426,7 +502,17 @@
 				INTR_IDX_UNDERRUN,
 				sde_encoder_phys_cmd_underrun_irq,
 				"underrun");
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_CTL_START,
+				INTR_IDX_CTL_START,
+				sde_encoder_phys_cmd_ctl_start_irq,
+				"ctl_start");
 	} else {
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_CTL_START);
 		sde_encoder_phys_cmd_unregister_irq(
 				phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
@@ -513,12 +599,11 @@
 	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
 }
 
-static void sde_encoder_phys_cmd_pingpong_config(
+static void _sde_encoder_phys_cmd_pingpong_config(
 		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
 	if (!phys_enc || !phys_enc->hw_ctl ||
 			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
@@ -530,13 +615,7 @@
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	intf_cfg.intf = cmd_enc->intf_idx;
-	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
-	intf_cfg.stream_sel = cmd_enc->stream_sel;
-	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
-
-	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
-
+	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -549,10 +628,9 @@
 	return _sde_encoder_phys_is_ppsplit(phys_enc);
 }
 
-static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+static void sde_encoder_phys_cmd_enable_helper(
+		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_hw_ctl *ctl;
 	u32 flush_mask = 0;
 
@@ -560,6 +638,25 @@
 		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 		return;
 	}
+
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid phys encoder\n");
+		return;
+	}
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
 	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
@@ -567,17 +664,8 @@
 		return;
 	}
 
-	sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
-
-	sde_encoder_phys_cmd_pingpong_config(phys_enc);
-
-	ctl = phys_enc->hw_ctl;
-	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
-	ctl->ops.update_pending_flush(ctl, flush_mask);
+	sde_encoder_phys_cmd_enable_helper(phys_enc);
 	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
-			ctl->idx - CTL_0, flush_mask);
 }
 
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
@@ -639,7 +727,7 @@
 		return;
 	}
 	SDE_DEBUG_CMDENC(cmd_enc, "\n");
-	hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
@@ -671,24 +759,93 @@
 	}
 }
 
+static int _sde_encoder_phys_cmd_wait_for_ctl_start(
+		struct sde_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct sde_hw_ctl *ctl;
+	u32 irq_status;
+	struct sde_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc->hw_ctl) {
+		SDE_ERROR("invalid ctl\n");
+		return -EINVAL;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	rc = sde_encoder_helper_wait_event_timeout(DRMID(phys_enc->parent),
+			ctl->idx - CTL_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_ctlstart_cnt,
+			CTL_START_TIMEOUT_MS);
+	if (rc <= 0) {
+		/* read and clear interrupt */
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				cmd_enc->irq_idx[INTR_IDX_CTL_START], true);
+		if (irq_status) {
+			unsigned long flags;
+
+			SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
+			SDE_DEBUG_CMDENC(cmd_enc,
+					"ctl:%d start done but irq not triggered\n",
+					ctl->idx - CTL_0);
+			local_irq_save(flags);
+			sde_encoder_phys_cmd_ctl_start_irq(cmd_enc,
+					INTR_IDX_CTL_START);
+			local_irq_restore(flags);
+			rc = 0;
+		} else {
+			SDE_ERROR("ctl start interrupt wait failed\n");
+			rc = -EINVAL;
+		}
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
+	int rc = 0;
+	struct sde_encoder_phys_cmd *cmd_enc;
 
-	if (cmd_enc->serialize_wait4pp)
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (sde_encoder_phys_cmd_is_master(phys_enc))
+		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
 		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
 
-	/*
-	 * following statement is true serialize_wait4pp is false.
-	 *
-	 * Since ctl_start "commits" the transaction to hardware, and the
-	 * tearcheck block takes it from there, there is no need to have a
-	 * separate wait for committed, a la wait-for-vsync in video mode
-	 */
+	return rc;
+}
 
-	return 0;
+static void sde_encoder_phys_cmd_update_split_role(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_enc_split_role role)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	enum sde_enc_split_role old_role = phys_enc->split_role;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
+			old_role, role);
+
+	phys_enc->split_role = role;
+	if (role == ENC_ROLE_SKIP || role == old_role)
+		return;
+
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
+	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
 }
 
 static void sde_encoder_phys_cmd_init_ops(
@@ -708,6 +865,8 @@
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
+	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
+	ops->restore = sde_encoder_phys_cmd_enable_helper;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -735,8 +894,6 @@
 		goto fail_mdp_init;
 	}
 	phys_enc->hw_mdptop = hw_mdp;
-
-	cmd_enc->intf_idx = p->intf_idx;
 	phys_enc->intf_idx = p->intf_idx;
 
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -753,6 +910,7 @@
 		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 82f1c09..a62aa6e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -26,6 +26,7 @@
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 #define   CTL_ROT_TOP                   0x0C0
@@ -78,6 +79,11 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
+static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
 static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
 {
 	SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
@@ -537,6 +543,7 @@
 	ops->trigger_flush = sde_hw_ctl_trigger_flush;
 	ops->get_flush_register = sde_hw_ctl_get_flush_register;
 	ops->trigger_start = sde_hw_ctl_trigger_start;
+	ops->trigger_pending = sde_hw_ctl_trigger_pending;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
 	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 7ae43b7..ace05e8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -91,6 +91,14 @@
 	void (*trigger_start)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * kickoff rotator operation for Sw controlled interfaces
 	 * DSI cmd mode and WB interface are SW controlled
 	 * @ctx       : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 93268be..c408861 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -75,12 +75,6 @@
 
 #define TX_MODE_BUFFER_LINE_THRES 2
 
-/* dirty bits for update function */
-#define SDE_PLANE_DIRTY_RECTS	0x1
-#define SDE_PLANE_DIRTY_FORMAT	0x2
-#define SDE_PLANE_DIRTY_SHARPEN	0x4
-#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
-
 #define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
 #define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
 
@@ -107,7 +101,8 @@
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
  * @catalog: Points to sde catalog structure
  * @sbuf_mode: force stream buffer mode if set
- * @sbuf_writeback: fource stream buffer writeback if set
+ * @sbuf_writeback: force stream buffer writeback if set
+ * @revalidate: force revalidation of all the plane properties
  * @blob_rot_caps: Pointer to rotator capability blob
  */
 struct sde_plane {
@@ -134,6 +129,7 @@
 	struct sde_mdss_cfg *catalog;
 	u32 sbuf_mode;
 	u32 sbuf_writeback;
+	bool revalidate;
 
 	struct sde_hw_pixel_ext pixel_ext;
 	bool pixel_ext_usr;
@@ -499,6 +495,17 @@
 			&psde->pipe_qos_cfg);
 }
 
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable)
+{
+	struct sde_plane *psde;
+
+	if (!plane)
+		return;
+
+	psde = to_sde_plane(plane);
+	psde->revalidate = enable;
+}
+
 int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
 {
 	struct sde_plane *psde;
@@ -2250,6 +2257,14 @@
 			state->crtc_w, state->crtc_h,
 			state->crtc_x, state->crtc_y);
 
+	/* force reprogramming of all the parameters, if the flag is set */
+	if (psde->revalidate) {
+		SDE_DEBUG("plane:%d - reconfigure all the parameters\n",
+				plane->base.id);
+		pstate->dirty = SDE_PLANE_DIRTY_ALL;
+		psde->revalidate = false;
+	}
+
 	/* determine what needs to be refreshed */
 	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
 		switch (idx) {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e955f41..ac70542 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -91,6 +91,12 @@
 	int out_xpos;
 };
 
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS	0x1
+#define SDE_PLANE_DIRTY_FORMAT	0x2
+#define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
+
 /**
  * struct sde_plane_state: Define sde extension of drm plane state object
  * @base:	base drm plane state object
@@ -222,4 +228,12 @@
 int sde_plane_color_fill(struct drm_plane *plane,
 		uint32_t color, uint32_t alpha);
 
+/**
+ * sde_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
 #endif /* _SDE_PLANE_H_ */
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 4314616..b99c1df 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3115,7 +3115,7 @@
 {
 	int sioaddr[2] = { REG_2E, REG_4E };
 	struct it87_sio_data sio_data;
-	unsigned short isa_address;
+	unsigned short isa_address[2];
 	bool found = false;
 	int i, err;
 
@@ -3125,15 +3125,29 @@
 
 	for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
 		memset(&sio_data, 0, sizeof(struct it87_sio_data));
-		isa_address = 0;
-		err = it87_find(sioaddr[i], &isa_address, &sio_data);
-		if (err || isa_address == 0)
+		isa_address[i] = 0;
+		err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+		if (err || isa_address[i] == 0)
 			continue;
+		/*
+		 * Don't register second chip if its ISA address matches
+		 * the first chip's ISA address.
+		 */
+		if (i && isa_address[i] == isa_address[0])
+			break;
 
-		err = it87_device_add(i, isa_address, &sio_data);
+		err = it87_device_add(i, isa_address[i], &sio_data);
 		if (err)
 			goto exit_dev_unregister;
+
 		found = true;
+
+		/*
+		 * IT8705F may respond on both SIO addresses.
+		 * Stop probing after finding one.
+		 */
+		if (sio_data.type == it87)
+			break;
 	}
 
 	if (!found) {
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index aded314..8ba6da4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -730,6 +730,7 @@
 {
 	int atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
+	struct iommu_group *group;
 	struct iommu_pgtbl_info info;
 	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
@@ -746,7 +747,18 @@
 	mapping->fast->domain = domain;
 	mapping->fast->dev = dev;
 
-	if (iommu_attach_device(domain, dev))
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return -ENODEV;
+	}
+
+	if (iommu_get_domain_for_dev(dev)) {
+		dev_err(dev, "Device already attached to other iommu_domain\n");
+		return -EINVAL;
+	}
+
+	if (iommu_attach_group(mapping->domain, group))
 		return -EINVAL;
 
 	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
@@ -781,7 +793,7 @@
 void fast_smmu_detach_device(struct device *dev,
 			     struct dma_iommu_mapping *mapping)
 {
-	iommu_detach_device(mapping->domain, dev);
+	iommu_detach_group(mapping->domain, dev->iommu_group);
 	dev->archdata.mapping = NULL;
 	set_dma_ops(dev, NULL);
 
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 181e889..bea5f03 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -548,7 +548,7 @@
 		}
 	}
 
-	if (iommu_attach_device(domain, dev)) {
+	if (iommu_attach_group(domain, dev->iommu_group)) {
 		seq_puts(s,
 			 "Couldn't attach new domain to device. Is it already attached?\n");
 		goto out_domain_free;
@@ -669,7 +669,7 @@
 	}
 
 out_detach:
-	iommu_detach_device(domain, dev);
+	iommu_detach_group(domain, dev->iommu_group);
 out_domain_free:
 	iommu_domain_free(domain);
 }
@@ -1451,6 +1451,8 @@
 static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
 					int val, bool is_secure)
 {
+	struct iommu_group *group = ddev->dev->iommu_group;
+
 	ddev->domain = iommu_domain_alloc(&platform_bus_type);
 	if (!ddev->domain) {
 		pr_err("Couldn't allocate domain\n");
@@ -1464,8 +1466,8 @@
 		goto out_domain_free;
 	}
 
-	if (iommu_attach_device(ddev->domain, ddev->dev)) {
-		pr_err("Couldn't attach new domain to device. Is it already attached?\n");
+	if (iommu_attach_group(ddev->domain, group)) {
+		dev_err(ddev->dev, "Couldn't attach new domain to device\n");
 		goto out_domain_free;
 	}
 
@@ -1483,6 +1485,8 @@
 					  bool is_secure)
 {
 	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	struct iommu_domain *domain;
 	ssize_t retval;
 	int val;
 
@@ -1494,12 +1498,15 @@
 
 	if (val) {
 		if (ddev->domain) {
-			pr_err("Already attached.\n");
+			pr_err("Iommu-Debug is already attached?\n");
 			retval = -EINVAL;
 			goto out;
 		}
-		if (WARN(ddev->dev->archdata.iommu,
-			 "Attachment tracking out of sync with device\n")) {
+
+		domain = iommu_get_domain_for_dev(dev);
+		if (domain) {
+			pr_err("Another driver is using this device's iommu\n"
+				"Iommu-Debug cannot be used concurrently\n");
 			retval = -EINVAL;
 			goto out;
 		}
@@ -1510,11 +1517,11 @@
 		pr_err("Attached\n");
 	} else {
 		if (!ddev->domain) {
-			pr_err("No domain. Did you already attach?\n");
+			pr_err("Iommu-Debug is not attached?\n");
 			retval = -EINVAL;
 			goto out;
 		}
-		iommu_detach_device(ddev->domain, ddev->dev);
+		iommu_detach_group(ddev->domain, dev->iommu_group);
 		iommu_domain_free(ddev->domain);
 		ddev->domain = NULL;
 		pr_err("Detached\n");
@@ -1566,7 +1573,6 @@
 {
 	return __iommu_debug_attach_write(file, ubuf, count, offset,
 					  true);
-
 }
 
 static const struct file_operations iommu_debug_secure_attach_fops = {
@@ -1868,6 +1874,10 @@
 	if (!of_find_property(dev->of_node, "iommus", NULL))
 		return 0;
 
+	/* Hold a reference count */
+	if (!iommu_group_get(dev))
+		return 0;
+
 	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
 	if (!ddev)
 		return -ENODEV;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c9281fb..daccf64 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -458,9 +458,6 @@
 				u64 offset = ptr - gic_data.redist_regions[i].redist_base;
 				gic_data_rdist_rd_base() = ptr;
 				gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
-				pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
-					smp_processor_id(), mpidr, i,
-					&gic_data_rdist()->phys_base);
 				return 0;
 			}
 
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 89ec6d2..be13ebf 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1847,7 +1847,7 @@
 	if (r)
 		goto out;
 
-	param->data_size = sizeof(*param);
+	param->data_size = offsetof(struct dm_ioctl, data);
 	r = fn(param, input_param_size);
 
 	if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index 87707b1..e6da6ca 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1,3 +1,9 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o cam_req_mgr_workq.o cam_mem_mgr.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o \
+				cam_req_mgr_util.o \
+				cam_req_mgr_core.o \
+				cam_req_mgr_workq.o \
+				cam_mem_mgr.o \
+				cam_req_mgr_timer.o \
+				cam_req_mgr_debug.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index a34703c..e62c101 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -18,588 +18,1960 @@
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
 #include "cam_req_mgr_workq.h"
-
-/* Forward declarations */
-static int cam_req_mgr_cb_notify_sof(
-	struct cam_req_mgr_sof_notify *sof_data);
-
+#include "cam_req_mgr_debug.h"
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 
-static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
-	.notify_sof = cam_req_mgr_cb_notify_sof,
-	.notify_err = NULL,
-	.add_req = NULL,
-};
+static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                  i = 0;
+	int                      rc = 0;
+	struct crm_task_payload *task_data = NULL;
+
+	task_data = kcalloc(
+		workq->task.num_task, sizeof(*task_data),
+		GFP_KERNEL);
+	if (!task_data) {
+		rc = -ENOMEM;
+	} else {
+		for (i = 0; i < workq->task.num_task; i++)
+			workq->task.pool[i].payload = &task_data[i];
+	}
+
+	return rc;
+}
 
 /**
- * cam_req_mgr_pvt_find_link()
+ * __cam_req_mgr_reset_req_tbl()
  *
- * @brief: Finds link matching with handle within session
- * @session: session indetifier
- * @link_hdl: link handle
+ * @brief : Initialize req table data
+ * @in_q  : request queue pointer
  *
- * Returns pointer to link matching handle
+ * @return: 0 for success, negative for failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_find_link(
-	struct cam_req_mgr_core_session *session, int32_t link_hdl)
+static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req)
 {
-	int32_t i;
+	int                           rc = 0;
+	int32_t                       i = 0;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+	struct cam_req_mgr_req_tbl   *req_tbl = req->l_tbl;
+
+	if (!in_q || !req_tbl) {
+		CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+		return -EINVAL;
+	}
+	CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+	mutex_lock(&req->lock);
+	for (i = 0; i < in_q->num_slots; i++) {
+		CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+			in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
+	}
+
+	while (req_tbl != NULL) {
+		for (i = 0; i < req_tbl->num_slots; i++) {
+			CRM_DBG("idx= %d, map= %x, state= %d",
+				req_tbl->slot[i].idx,
+				req_tbl->slot[i].req_ready_map,
+				req_tbl->slot[i].state);
+		}
+		CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+			req_tbl->id, req_tbl->pd, req_tbl->dev_count,
+			req_tbl->dev_mask, req_tbl->skip_traverse,
+			req_tbl->num_slots);
+		req_tbl = req_tbl->next;
+	}
+	mutex_unlock(&req->lock);
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_find_pd_tbl()
+ *
+ * @brief    : Find pipeline delay based table pointer which matches delay
+ * @tbl      : Pointer to list of request table
+ * @delay    : Pipeline delay value to be searched for comparison
+ *
+ * @return   : pointer to request table for matching pipeline delay table.
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
+	struct cam_req_mgr_req_tbl *tbl, int32_t delay)
+{
+	if (!tbl)
+		return NULL;
+
+	do {
+		if (delay != tbl->pd)
+			tbl = tbl->next;
+		else
+			return tbl;
+	} while (tbl != NULL);
+
+	return NULL;
+}
+
+/**
+ * __cam_req_mgr_inc_idx()
+ *
+ * @brief    : Increment val passed by step size and rollover after max_val
+ * @val      : value to be incremented
+ * @step     : amount/step by which val is incremented
+ * @max_val  : max val after which idx will roll over
+ *
+ */
+static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = (*val + step) % max_val;
+}
+
+/**
+ * __cam_req_mgr_dec_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @val      : value to be decremented
+ * @step     : amount/step by which val is decremented
+ * @max_val  : after zero value will roll over to max val
+ *
+ */
+static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = *val - step;
+	if (*val < 0)
+		*val = max_val + (*val);
+}
+
+/**
+ * __cam_req_mgr_traverse()
+ *
+ * @brief    : Traverse through pd tables, it will internally cover all linked
+ *             pd tables. Each pd table visited will check if idx passed to its
+ *             in ready state. If ready means all devices linked to the pd table
+ *             have this request id packet ready. Then it calls subsequent pd
+ *             tbl with new idx. New idx value takes into account the delta
+ *             between current pd table and next one.
+ * @traverse_data: contains all the info to traverse through pd tables
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
+{
+	int                          rc = 0;
+	int32_t                      next_idx = traverse_data->idx;
+	int32_t                      curr_idx = traverse_data->idx;
+	struct cam_req_mgr_req_tbl  *tbl;
+	struct cam_req_mgr_apply    *apply_data;
+
+	if (!traverse_data->tbl || !traverse_data->apply_data) {
+		CRM_ERR("NULL pointer %pK %pK",
+			traverse_data->tbl, traverse_data->apply_data);
+		traverse_data->result = 0;
+		return -EINVAL;
+	}
+
+	tbl = traverse_data->tbl;
+	apply_data = traverse_data->apply_data;
+	CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
+		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
+
+	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
+	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
+		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
+		tbl->skip_traverse > 0) {
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		if (rc >= 0) {
+			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+			apply_data[tbl->pd].pd = tbl->pd;
+			apply_data[tbl->pd].req_id =
+				CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
+			apply_data[tbl->pd].idx = curr_idx;
+
+			/* If traverse is sucessful decrement traverse skip */
+			if (tbl->skip_traverse > 0) {
+				apply_data[tbl->pd].req_id = -1;
+				tbl->skip_traverse--;
+			}
+		} else {
+			/* linked pd table is not ready for this traverse yet */
+			return rc;
+		}
+	} else {
+		/* This pd table is not ready to proceed with asked idx */
+		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_in_q_skip_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @in_q     : input queue pointer
+ * @idx      : Sets skip_idx bit of the particular slot to true so when traverse
+ *             happens for this idx, no req will be submitted for devices
+ *             handling this idx.
+ *
+ */
+static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
+	int32_t idx)
+{
+	in_q->slot[idx].req_id = -1;
+	in_q->slot[idx].skip_idx = 1;
+	in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+	CRM_DBG("SET IDX SKIP on slot= %d", idx);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_id()
+ *
+ * @brief    : Set unique id to table
+ * @tbl      : pipeline based table which requires new id
+ * @req      : pointer to request data wihch contains num_tables counter
+ *
+ */
+static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
+	struct cam_req_mgr_req_data *req)
+{
+	if (!tbl)
+		return;
+	do {
+		tbl->id = req->num_tbl++;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_all_skip_cnt()
+ *
+ * @brief    : Each pd table sets skip value based on delta between itself and
+ *             max pd value. During initial streamon or bubble case this is
+ *             used. That way each pd table skips required num of traverse and
+ *             align themselve with req mgr connected devs.
+ * @l_tbl    : iterates through list of pd tables and sets skip traverse
+ *
+ */
+static void __cam_req_mgr_tbl_set_all_skip_cnt(
+	struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl = *l_tbl;
+	int32_t                     max_pd;
+
+	if (!tbl)
+		return;
+
+	max_pd = tbl->pd;
+	do {
+		tbl->skip_traverse = max_pd - tbl->pd;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_reset_req_slot()
+ *
+ * @brief    : reset specified idx/slot in input queue as well as all pd tables
+ * @link     : link pointer
+ * @idx      : slot index which will be reset
+ *
+ */
+static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	struct cam_req_mgr_slot      *slot;
+	struct cam_req_mgr_req_tbl   *tbl = link->req.l_tbl;
+	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+	slot = &in_q->slot[idx];
+	CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if CSL has already pushed new request*/
+	if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
+		return;
+
+	/* Reset input queue slot */
+	slot->req_id = -1;
+	slot->skip_idx = 0;
+	slot->recover = 0;
+	slot->status = CRM_SLOT_STATUS_NO_REQ;
+
+	/* Reset all pd table slot */
+	while (tbl != NULL) {
+		CRM_DBG("pd: %d: idx %d state %d",
+			tbl->pd, idx, tbl->slot[idx].state);
+		tbl->slot[idx].req_ready_map = 0;
+		tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
+		tbl = tbl->next;
+	}
+}
+
+/**
+ * __cam_req_mgr_check_next_req_slot()
+ *
+ * @brief    : While streaming if input queue does not contain any pending
+ *             request, req mgr still needs to submit pending request ids to
+ *             devices with lower pipeline delay value.
+ * @in_q     : Pointer to input queue where req mgr wil peep into
+ *
+ */
+static void __cam_req_mgr_check_next_req_slot(
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int32_t                  idx = in_q->rd_idx;
+	struct cam_req_mgr_slot *slot;
+
+	__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
+	slot = &in_q->slot[idx];
+
+	CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if there is new req from CSL, if not complete req */
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		if (in_q->wr_idx != idx)
+			CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+		__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	}
+}
+
+/**
+ * __cam_req_mgr_send_req()
+ *
+ * @brief    : send request id to be applied to each device connected on link
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @in_q     : pointer to input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int                                  rc = 0, pd, i, idx;
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_apply_request     apply_req;
+	struct cam_req_mgr_link_evt_data     evt_data;
+
+	apply_req.link_hdl = link->link_hdl;
+	apply_req.report_if_bubble = 0;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev) {
+			pd = dev->dev_info.p_delay;
+			if (pd >= CAM_PIPELINE_DELAY_MAX) {
+				CRM_WARN("pd %d greater than max",
+					pd);
+				continue;
+			}
+			if (link->req.apply_data[pd].skip_idx ||
+				link->req.apply_data[pd].req_id < 0) {
+				CRM_DBG("skip %d req_id %lld",
+					link->req.apply_data[pd].skip_idx,
+					link->req.apply_data[pd].req_id);
+				continue;
+			}
+			apply_req.dev_hdl = dev->dev_hdl;
+			apply_req.request_id =
+				link->req.apply_data[pd].req_id;
+			idx = link->req.apply_data[pd].idx;
+			apply_req.report_if_bubble =
+				in_q->slot[idx].recover;
+			CRM_DBG("SEND: pd %d req_id %lld",
+				pd, apply_req.request_id);
+			if (dev->ops && dev->ops->apply_req) {
+				rc = dev->ops->apply_req(&apply_req);
+				if (rc < 0)
+					break;
+			}
+		}
+	}
+	if (rc < 0) {
+		CRM_ERR("APPLY FAILED pd %d req_id %lld",
+			dev->dev_info.p_delay, apply_req.request_id);
+		/* Apply req failed notify already applied devs */
+		for (; i >= 0; i--) {
+			dev = &link->l_dev[i];
+			evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
+			evt_data.link_hdl =  link->link_hdl;
+			evt_data.req_id = apply_req.request_id;
+			evt_data.u.error = CRM_KMD_ERR_BUBBLE;
+			if (dev->ops && dev->ops->process_evt)
+				dev->ops->process_evt(&evt_data);
+		}
+	}
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_check_link_is_ready()
+ *
+ * @brief    : traverse through all request tables and see if all devices are
+ *             ready to apply request settings.
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @idx      : index within input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	int                            rc;
+	struct cam_req_mgr_traverse    traverse_data;
+	struct cam_req_mgr_req_queue  *in_q;
+	struct cam_req_mgr_apply      *apply_data;
+
+	in_q = link->req.in_q;
+
+	apply_data = link->req.apply_data;
+	memset(apply_data, 0,
+		sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+
+	traverse_data.apply_data = apply_data;
+	traverse_data.idx = idx;
+	traverse_data.tbl = link->req.l_tbl;
+	traverse_data.in_q = in_q;
+	traverse_data.result = 0;
+	/*
+	 *  Traverse through all pd tables, if result is success,
+	 *  apply the settings
+	 */
+
+	rc = __cam_req_mgr_traverse(&traverse_data);
+	CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+		idx, traverse_data.result, link->pd_mask, rc);
+
+	if (!rc && traverse_data.result == link->pd_mask) {
+		CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+			link->link_hdl, idx,
+			apply_data[2].req_id, apply_data[1].req_id,
+			apply_data[0].req_id);
+	} else
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_process_req()
+ *
+ * @brief    : processes read index in request queue and traverse through table
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link)
+{
+	int                                  rc = 0, idx;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_req_queue        *in_q;
+	struct cam_req_mgr_core_session     *session;
+
+	in_q = link->req.in_q;
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	/*
+	 * 1. Check if new read index,
+	 * - if in pending  state, traverse again to complete
+	 *    transaction of this read index.
+	 * - if in applied_state, somthign wrong.
+	 * - if in no_req state, no new req
+	 */
+	CRM_DBG("idx %d req_status %d",
+		in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+
+	slot = &in_q->slot[in_q->rd_idx];
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		CRM_DBG("No Pending req");
+		return 0;
+	}
+
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
+	if (rc >= 0) {
+		rc = __cam_req_mgr_send_req(link, link->req.in_q);
+		if (rc < 0) {
+			/* Apply req failed retry at next sof */
+			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		} else {
+			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+
+			if (link->state == CAM_CRM_LINK_STATE_ERR) {
+				CRM_WARN("Err recovery done idx %d status %d",
+					in_q->rd_idx,
+					in_q->slot[in_q->rd_idx].status);
+				mutex_lock(&link->lock);
+				link->state = CAM_CRM_LINK_STATE_READY;
+				mutex_unlock(&link->lock);
+			}
+
+			/*
+			 * 2. Check if any new req is pending in input queue,
+			 *    if not finish the lower pipeline delay device with
+			 *    available req ids.
+			 */
+			__cam_req_mgr_check_next_req_slot(in_q);
+
+			/*
+			 * 3. Older req slots can be safely reset as no err ack.
+			 */
+			idx = in_q->rd_idx;
+			__cam_req_mgr_dec_idx(&idx, link->max_delay + 1,
+				in_q->num_slots);
+			__cam_req_mgr_reset_req_slot(link, idx);
+		}
+	} else {
+		/*
+		 * 4.If traverse result is not success, then some devices are
+		 *   not ready with packet for the asked request id,
+		 *   hence try again in next sof
+		 */
+		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		if (link->state == CAM_CRM_LINK_STATE_ERR) {
+			/*
+			 * During error recovery all tables should be ready
+			 *   don't expect to enter here.
+			 * @TODO: gracefully handle if recovery fails.
+			 */
+			CRM_ERR("FATAL recovery cant finish idx %d status %d",
+				in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+			rc = -EPERM;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_add_tbl_to_link()
+ *
+ * @brief    : Add table to list under link sorted by pd decremeting order
+ * @l_tbl    : list of pipeline delay tables.
+ * @new_tbl : new tbl which will be appended to above list as per its pd value
+ *
+ */
+static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
+	struct cam_req_mgr_req_tbl *new_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl;
+
+	if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
+		new_tbl->next = *l_tbl;
+		if (*l_tbl) {
+			new_tbl->pd_delta =
+				new_tbl->pd - (*l_tbl)->pd;
+		}
+		*l_tbl = new_tbl;
+	} else {
+		tbl = *l_tbl;
+
+		/* Reach existing  tbl which has less pd value */
+		while (tbl->next != NULL &&
+			new_tbl->pd < tbl->next->pd) {
+			tbl = tbl->next;
+		}
+		if (tbl->next != NULL) {
+			new_tbl->pd_delta =
+				new_tbl->pd - tbl->next->pd;
+		} else {
+			/* This is last table in linked list*/
+			new_tbl->pd_delta = 0;
+		}
+		new_tbl->next = tbl->next;
+		tbl->next = new_tbl;
+		tbl->pd_delta = tbl->pd - new_tbl->pd;
+	}
+	CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+		new_tbl->pd_delta);
+}
+
+/**
+ * __cam_req_mgr_create_pd_tbl()
+ *
+ * @brief    : Creates new request table for new delay value
+ * @delay    : New pd table allocated will have this delay value
+ *
+ * @return   : pointer to newly allocated table, NULL for failure
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
+{
+	struct cam_req_mgr_req_tbl *tbl =
+		kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
+	if (tbl != NULL) {
+		tbl->num_slots = MAX_REQ_SLOTS;
+		CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+	}
+
+	return tbl;
+}
+
+/**
+ * __cam_req_mgr_destroy_all_tbl()
+ *
+ * @brief   : This func will destroy all pipeline delay based req table structs
+ * @l_tbl    : pointer to first table in list and it has max pd .
+ *
+ */
+static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl  *tbl = *l_tbl, *temp;
+
+	CRM_DBG("*l_tbl %pK", tbl);
+	while (tbl != NULL) {
+		temp = tbl->next;
+		kfree(tbl);
+		tbl = temp;
+	}
+	*l_tbl = NULL;
+}
+
+/**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief    : Find idx from input queue at which req id is enqueued
+ * @in_q     : input request queue pointer
+ * @req_id   : request id which needs to be searched in input queue
+ *
+ * @return   : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+	int32_t                   idx, i;
+	struct cam_req_mgr_slot  *slot;
+
+	idx = in_q->wr_idx;
+	for (i = 0; i < in_q->num_slots; i++) {
+		slot = &in_q->slot[idx];
+		if (slot->req_id == req_id) {
+			CRM_DBG("req %lld found at %d %d status %d",
+				req_id, idx, slot->idx,
+				slot->status);
+			break;
+		}
+		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+	}
+	if (i >= in_q->num_slots)
+		idx = -1;
+
+	return idx;
+}
+
+/**
+ * __cam_req_mgr_setup_in_q()
+ *
+ * @brief : Initialize req table data
+ * @req   : request data pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int  __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
+{
+	int                           i;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	in_q->num_slots = MAX_REQ_SLOTS;
+
+	for (i = 0; i < in_q->num_slots; i++) {
+		in_q->slot[i].idx = i;
+		in_q->slot[i].req_id = -1;
+		in_q->slot[i].skip_idx = 0;
+		in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
+	}
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_reset_req_tbl()
+ *
+ * @brief : Initialize req table data
+ * @req   : request queue pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
+{
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
+	in_q->num_slots = 0;
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_sof_freeze()
+ *
+ * @brief : Apoptosis - Handles case when connected devices are not responding
+ * @data  : timer pointer
+ *
+ */
+static void __cam_req_mgr_sof_freeze(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
 	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	link = (struct cam_req_mgr_core_link *)timer->parent;
+	CRM_ERR("SOF freeze for link %x", link->link_hdl);
+}
+
+/**
+ * __cam_req_mgr_create_subdevs()
+ *
+ * @brief   : Create new crm  subdev to link with realtime devices
+ * @l_dev   : list of subdevs internal to crm
+ * @num_dev : num of subdevs to be created for link
+ *
+ * @return  : pointer to allocated list of devices
+ */
+static int __cam_req_mgr_create_subdevs(
+	struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
+{
+	int rc = 0;
+	*l_dev = (struct cam_req_mgr_connected_device *)
+		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
+		GFP_KERNEL);
+	if (!*l_dev)
+		rc = -ENOMEM;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_subdev()
+ *
+ * @brief    : Cleans up the subdevs allocated by crm for link
+ * @l_device : pointer to list of subdevs crm created
+ *
+ */
+static void __cam_req_mgr_destroy_subdev(
+	struct cam_req_mgr_connected_device *l_device)
+{
+	kfree(l_device);
+	l_device = NULL;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief    : Cleans up the mem allocated while linking
+ * @link     : pointer to link, mem associated with this link is freed
+ *
+ */
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+{
+	int32_t                                 i = 0;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+
+	mutex_lock(&link->lock);
+
+	link_data.link_enable = 0;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = NULL;
+
+	/* Using device ops unlink devices */
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev != NULL) {
+			if (dev->ops && dev->ops->link_setup)
+				dev->ops->link_setup(&link_data);
+			dev->dev_hdl = 0;
+			dev->parent = NULL;
+			dev->ops = NULL;
+		}
+	}
+	__cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
+	__cam_req_mgr_reset_in_q(&link->req);
+	link->req.num_tbl = 0;
+	mutex_destroy(&link->req.lock);
+
+	link->pd_mask = 0;
+	link->num_devs = 0;
+	link->max_delay = 0;
+
+	mutex_unlock(&link->lock);
+}
+
+/**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief: Reserves one link data struct within session
+ * @session: session identifier
+ *
+ * @return: pointer to link reserved
+ *
+ */
+static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
+	struct cam_req_mgr_core_session *session)
+{
+	struct cam_req_mgr_core_link *link;
+	struct cam_req_mgr_req_queue *in_q;
+
+	if (!session || !g_crm_core_dev) {
+		CRM_ERR("NULL session/core_dev ptr");
 		return NULL;
 	}
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_hdl == link_hdl) {
-			CRM_DBG("Link found p_delay %d",
-				 link->max_pipeline_delay);
-			spin_unlock(&link->lock);
-			break;
-		}
-		spin_unlock(&link->lock);
+	if (session->num_links >= MAX_LINKS_PER_SESSION) {
+		CRM_ERR("Reached max links %d per session limit %d",
+			session->num_links, MAX_LINKS_PER_SESSION);
+		return NULL;
 	}
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-	spin_unlock(&session->lock);
+
+	link = (struct cam_req_mgr_core_link *)
+		kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
+	if (!link) {
+		CRM_ERR("failed to create link, no mem");
+		return NULL;
+	}
+	in_q = &session->in_q;
+	mutex_init(&link->lock);
+
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	link->num_devs = 0;
+	link->max_delay = 0;
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
+	link->req.in_q = in_q;
+	in_q->num_slots = 0;
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	link->parent = (void *)session;
+	mutex_unlock(&link->lock);
+
+	mutex_lock(&session->lock);
+	session->links[session->num_links] = link;
+	session->num_links++;
+	CRM_DBG("Active session links (%d)",
+		session->num_links);
+	mutex_unlock(&session->lock);
 
 	return link;
 }
 
 /**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief  : Reserves one link data struct within session
+ * @session: session identifier
+ * @link   : link identifier
+ *
+ */
+static void __cam_req_mgr_unreserve_link(
+	struct cam_req_mgr_core_session *session,
+	struct cam_req_mgr_core_link **link)
+{
+	int32_t   i = 0;
+
+	if (!session || !*link) {
+		CRM_ERR("NULL session/link ptr %pK %pK",
+			session, *link);
+		return;
+	}
+
+	mutex_lock(&session->lock);
+	if (!session->num_links)
+		CRM_WARN("No active link or invalid state %d",
+			session->num_links);
+	else {
+		for (i = 0; i < session->num_links; i++) {
+			if (session->links[i] == *link)
+				session->links[i] = NULL;
+		}
+		session->num_links--;
+		CRM_DBG("Active session links (%d)",
+			session->num_links);
+	}
+	kfree(*link);
+	*link = NULL;
+	mutex_unlock(&session->lock);
+
+}
+
+/* Workqueue context processing section */
+
+/**
+ * cam_req_mgr_process_send_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to send
+ *         apply request id to drivers.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_send_req(void *priv, void *data)
+{
+	int                                 rc = 0;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_send_request     *send_req = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	send_req = (struct cam_req_mgr_send_request *)data;
+	in_q = send_req->in_q;
+
+	rc = __cam_req_mgr_send_req(link, in_q);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_flush_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which requests need to be removedcancelled.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_flush_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx = -1;
+	struct cam_req_mgr_flush_info       *flush_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_flush_request     flush_req;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld type %d",
+		flush_info->link_hdl,
+		flush_info->req_id,
+		flush_info->flush_type);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		for (i = 0; i < in_q->num_slots; i++) {
+			slot = &in_q->slot[i];
+			slot->req_id = -1;
+			slot->skip_idx = 1;
+			slot->status = CRM_SLOT_STATUS_NO_REQ;
+		}
+		in_q->wr_idx = 0;
+		in_q->rd_idx = 0;
+	} else if (flush_info->flush_type ==
+		CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			flush_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				flush_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
+				slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be cancelled",
+					flush_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		}
+	}
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		flush_req.link_hdl = flush_info->link_hdl;
+		flush_req.dev_hdl = device->dev_hdl;
+		flush_req.req_id = flush_info->req_id;
+		flush_req.type = flush_info->flush_type;
+		/* @TODO: error return handling from drivers */
+		if (device->ops && device->ops->flush_req)
+			rc = device->ops->flush_req(&flush_req);
+	}
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_sched_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_sched_req(void *priv, void *data)
+{
+	int                               rc = 0;
+	struct cam_req_mgr_sched_request *sched_req = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_req_queue     *in_q = NULL;
+	struct cam_req_mgr_slot          *slot = NULL;
+	struct crm_task_payload          *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld",
+		sched_req->link_hdl,
+		sched_req->req_id);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	slot = &in_q->slot[in_q->wr_idx];
+
+	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+		CRM_ERR("in_q overwrite %d", slot->status);
+		/* @TODO: error handling */
+	}
+	CRM_DBG("sched_req %lld at slot %d",
+		sched_req->req_id, in_q->wr_idx);
+
+	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
+	slot->req_id = sched_req->req_id;
+	slot->skip_idx = 0;
+	slot->recover = sched_req->bubble_enable;
+	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_add_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_add_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx;
+	struct cam_req_mgr_add_request      *add_req = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_req_tbl          *tbl = NULL;
+	struct cam_req_mgr_tbl_slot         *slot = NULL;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	add_req = (struct cam_req_mgr_add_request *)&task_data->u;
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		if (device->dev_hdl == add_req->dev_hdl) {
+			tbl = device->pd_tbl;
+			break;
+		}
+	}
+	if (!tbl) {
+		CRM_ERR("dev_hdl not found %x, %x %x",
+			add_req->dev_hdl,
+			link->l_dev[0].dev_hdl,
+			link->l_dev[1].dev_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	/*
+	 * Go through request table and add
+	 * request id to proper table
+	 * 1. find req slot in in_q matching req_id.sent by dev
+	 * 2. goto table of this device based on p_delay
+	 * 3. mark req_ready_map with this dev_bit.
+	 */
+
+	mutex_lock(&link->req.lock);
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -EBADSLT;
+		mutex_unlock(&link->req.lock);
+		goto end;
+	}
+	slot = &tbl->slot[idx];
+	if (slot->state != CRM_REQ_STATE_PENDING &&
+		slot->state != CRM_REQ_STATE_EMPTY) {
+		CRM_WARN("Unexpected state %d for slot %d map %x",
+			slot->state, idx, slot->req_ready_map);
+	}
+
+	slot->state = CRM_REQ_STATE_PENDING;
+	slot->req_ready_map |= (1 << device->dev_bit);
+
+	CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
+		slot->req_ready_map);
+
+	if (slot->req_ready_map == tbl->dev_mask) {
+		CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+			idx, add_req->req_id, tbl->pd);
+		slot->state = CRM_REQ_STATE_READY;
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_error()
+ *
+ * @brief: This runs in workque thread context. bubble /err recovery.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_error(void *priv, void *data)
+{
+	int                                  rc = 0, idx = -1, i;
+	struct cam_req_mgr_error_notify     *err_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld error %d",
+		err_info->link_hdl,
+		err_info->req_id,
+		err_info->error);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			err_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				err_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (!slot->recover) {
+				CRM_WARN("err recovery disabled req_id %lld",
+					err_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return 0;
+			} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
+			&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be recovered %d",
+					err_info->req_id, slot->status);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			/* Notify all devices in the link about error */
+			for (i = 0; i < link->num_devs; i++) {
+				device = &link->l_dev[i];
+				if (device != NULL) {
+					evt_data.dev_hdl = device->dev_hdl;
+					evt_data.evt_type =
+						CAM_REQ_MGR_LINK_EVT_ERR;
+					evt_data.link_hdl =  link->link_hdl;
+					evt_data.req_id = err_info->req_id;
+					evt_data.u.error = err_info->error;
+					if (device->ops &&
+						device->ops->process_evt)
+						rc = device->ops->
+							process_evt(&evt_data);
+				}
+			}
+			/* Bring processing pointer to bubbled req id */
+			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+			in_q->rd_idx = idx;
+			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+			mutex_lock(&link->lock);
+			link->state = CAM_CRM_LINK_STATE_ERR;
+			mutex_unlock(&link->lock);
+		}
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
  * cam_req_mgr_process_sof()
  *
  * @brief: This runs in workque thread context. Call core funcs to check
- * which peding requests can be processed.
- * @data:contains information about frame_id, link etc.
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
  *
- * Returns 0 on success.
+ * @return: 0 on success.
  */
 static int cam_req_mgr_process_sof(void *priv, void *data)
 {
-	int ret = 0, i = 0;
-	struct cam_req_mgr_sof_notify *sof_data = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
-	struct cam_req_mgr_connected_device *device = NULL;
-	struct cam_req_mgr_apply_request apply_req;
+	int                                  rc = 0;
+	struct cam_req_mgr_sof_notify       *sof_data = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
 		CRM_ERR("input args NULL %pK %pK", data, priv);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
-	sof_data = (struct cam_req_mgr_sof_notify *)data;
+	task_data = (struct crm_task_payload *)data;
+	sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
 
 	CRM_DBG("link_hdl %x frame_id %lld",
 		sof_data->link_hdl,
 		sof_data->frame_id);
 
-	apply_req.link_hdl = sof_data->link_hdl;
-	/* @TODO: go through request table and issue
-	 * request id based on dev status
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	/*
+	 * Check if current read index is in applied state, if yes make it free
+	 *    and increment read index to next slot.
 	 */
-	apply_req.request_id = sof_data->frame_id;
-	apply_req.report_if_bubble = 0;
+	CRM_DBG("link_hdl %x curent idx %d req_status %d",
+		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
-	CRM_DBG("link %pK l_dev %pK num_dev %d",
-		link, link->l_devices, link->num_connections);
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		if (device != NULL) {
-			CRM_DBG("dev_id %d dev_hdl %x ops %pK p_delay %d",
-				device->dev_info.dev_id, device->dev_hdl,
-				device->ops, device->dev_info.p_delay);
-			apply_req.dev_hdl = device->dev_hdl;
-			if (device->ops && device->ops->apply_req) {
-				ret = device->ops->apply_req(&apply_req);
-				/* Error handling for this failure is pending */
-				if (ret < 0)
-					CRM_ERR("Failure:%d dev=%d", ret,
-						device->dev_info.dev_id);
-			}
+	if (link->state == CAM_CRM_LINK_STATE_ERR)
+		CRM_WARN("Error recovery idx %d status %d",
+			in_q->rd_idx,
+			in_q->slot[in_q->rd_idx].status);
 
-		}
+	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
+		/*
+		 * Do NOT reset req q slot data here, it can not be done
+		 * here because we need to preserve the data to handle bubble.
+		 */
+		__cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
 	}
+	rc = __cam_req_mgr_process_req(link);
+	mutex_unlock(&link->req.lock);
 
 end:
-	return ret;
+	return rc;
 }
 
-/**
- * cam_req_mgr_notify_sof()
- *
- * @brief: SOF received from device, sends trigger through workqueue
- * @sof_data: contains information about frame_id, link etc.
- *
- * Returns 0 on success
- */
-static int cam_req_mgr_cb_notify_sof(struct cam_req_mgr_sof_notify *sof_data)
-{
-	int                           ret = 0;
-	struct crm_workq_task        *task = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!sof_data) {
+/* Linked devices' Callback section */
+
+/**
+ * cam_req_mgr_cb_add_req()
+ *
+ * @brief    : Drivers call this function to notify new packet is available.
+ * @add_req  : Information about new request available at a device.
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
+{
+	int                             rc = 0, idx;
+	struct crm_workq_task          *task = NULL;
+	struct cam_req_mgr_core_link   *link = NULL;
+	struct cam_req_mgr_add_request *dev_req;
+	struct crm_task_payload        *task_data;
+
+	if (!add_req) {
 		CRM_ERR("sof_data is NULL");
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 
-	CRM_DBG("link_hdl %x frame_id %lld",
-		sof_data->link_hdl,
-		sof_data->frame_id);
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(add_req->link_hdl);
+
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Validate if req id is present in input queue */
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -ENOENT;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task dev %x req %lld",
+			add_req->dev_hdl, add_req->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
+	dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
+	dev_req->req_id = add_req->req_id;
+	dev_req->link_hdl = add_req->link_hdl;
+	dev_req->dev_hdl = add_req->dev_hdl;
+	task->process_cb = &cam_req_mgr_process_add_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_err()
+ *
+ * @brief    : Error received from device, sends bubble recovery
+ * @err_info : contains information about error occurred like bubble/overflow
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_notify_err(
+	struct cam_req_mgr_error_notify *err_info)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_error_notify *notify_err;
+	struct crm_task_payload         *task_data;
+
+	if (!err_info) {
+		CRM_ERR("err_info is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(err_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	crm_timer_reset(link->watchdog);
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task req_id %lld", err_info->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
+	notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
+	notify_err->req_id = err_info->req_id;
+	notify_err->link_hdl = err_info->link_hdl;
+	notify_err->dev_hdl = err_info->dev_hdl;
+	notify_err->error = err_info->error;
+	task->process_cb = &cam_req_mgr_process_error;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_sof()
+ *
+ * @brief   : SOF received from device, sends trigger through workqueue
+ * @sof_data: contains information about frame_id, link etc.
+ *
+ * @return  : 0 on success
+ *
+ */
+static int cam_req_mgr_cb_notify_sof(
+	struct cam_req_mgr_sof_notify *sof_data)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_sof_notify   *notify_sof;
+	struct crm_task_payload         *task_data;
+
+	if (!sof_data) {
+		CRM_ERR("sof_data is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
 
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sof_data->link_hdl);
 	if (!link) {
 		CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
-
 	}
 
+	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
 		CRM_ERR("no empty task frame %lld", sof_data->frame_id);
-		ret = -EBUSY;
+		rc = -EBUSY;
 		goto end;
 	}
-	task->type = CRM_WORKQ_TASK_NOTIFY_SOF;
-	task->u.notify_sof.frame_id = sof_data->frame_id;
-	task->u.notify_sof.link_hdl = sof_data->link_hdl;
-	task->u.notify_sof.dev_hdl = sof_data->dev_hdl;
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
+	notify_sof = (struct cam_req_mgr_sof_notify *)&task_data->u;
+	notify_sof->frame_id = sof_data->frame_id;
+	notify_sof->link_hdl = sof_data->link_hdl;
+	notify_sof->dev_hdl = sof_data->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_sof;
-	task->priv = link;
-	cam_req_mgr_workq_enqueue_task(task);
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
 end:
-	return ret;
+	return rc;
 }
 
+static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
+	.notify_sof = cam_req_mgr_cb_notify_sof,
+	.notify_err = cam_req_mgr_cb_notify_err,
+	.add_req    = cam_req_mgr_cb_add_req,
+};
+
 /**
- * cam_req_mgr_pvt_reserve_link()
+ * __cam_req_mgr_setup_link_info()
  *
- * @brief: Reserves one link data struct within session
- * @session: session identifier
+ * @brief     : Sets up input queue, create pd based tables, communicate with
+ *              devs connected on this link and setup communication.
+ * @link      : pointer to link to setup
+ * @link_info : link_info coming from CSL to prepare link
  *
- * Returns pointer to link reserved
+ * @return    : 0 on success, negative in case of failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_reserve_link(
-	struct cam_req_mgr_core_session *session)
+static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_link_info *link_info)
 {
-	int32_t i;
-	struct cam_req_mgr_core_link *link;
+	int                                     rc = 0, i = 0;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_req_tbl             *pd_tbl;
+	enum cam_pipeline_delay                 max_delay;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
-		return NULL;
-	}
+	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
+		return -EPERM;
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_state == CAM_CRM_LINK_STATE_AVAILABLE) {
-			link->num_connections = 0;
-			link->max_pipeline_delay = 0;
-			memset(link->req_table, 0,
-				sizeof(struct cam_req_mgr_request_table));
-			link->link_state = CAM_CRM_LINK_STATE_IDLE;
-			spin_unlock(&link->lock);
-			break;
+	mutex_init(&link->req.lock);
+	CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+	link->req.num_tbl = 0;
+
+	rc = __cam_req_mgr_setup_in_q(&link->req);
+	if (rc < 0)
+		return rc;
+
+	mutex_lock(&link->lock);
+	max_delay = CAM_PIPELINE_DELAY_0;
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+		/* Using dev hdl, get ops ptr to communicate with device */
+		dev->ops = (struct cam_req_mgr_kmd_ops *)
+			cam_get_device_ops(link_info->dev_hdls[i]);
+		if (!dev->ops ||
+			!dev->ops->get_dev_info ||
+			!dev->ops->link_setup) {
+			CRM_ERR("FATAL: device ops NULL");
+			rc = -ENXIO;
+			goto error;
 		}
-		spin_unlock(&link->lock);
-	}
-	CRM_DBG("Link available (total %d)", session->num_active_links);
-	spin_unlock(&session->lock);
-
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-
-	return link;
-}
-
-/**
- * cam_req_mgr_pvt_create_subdevs()
- *
- * @brief: Create new crm  subdev to link with realtime devices
- * @l_devices: list of subdevs internal to crm
- * @num_dev: num of subdevs to be created for link
- *
- * Returns pointer to allocated list of devices
- */
-static struct cam_req_mgr_connected_device *
-	cam_req_mgr_pvt_create_subdevs(int32_t num_dev)
-{
-	struct cam_req_mgr_connected_device *l_devices;
-
-	l_devices = (struct cam_req_mgr_connected_device *)
-		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
-		GFP_KERNEL);
-	if (!l_devices)
-		CRM_DBG("Insufficient memory %lu",
-			sizeof(struct cam_req_mgr_connected_device) * num_dev);
-
-	return l_devices;
-}
-
-/**
- * cam_req_mgr_pvt_destroy_subdev()
- *
- * @brief: Cleans up the subdevs allocated by crm for link
- * @l_device: pointer to list of subdevs crm created
- *
- * Returns 0 for success
- */
-static int cam_req_mgr_pvt_destroy_subdev(
-	struct cam_req_mgr_connected_device **l_device)
-{
-	int ret = 0;
-
-	if (!(*l_device))
-		ret = -EINVAL;
-	else {
-		kfree(*l_device);
-		*l_device = NULL;
+		dev->dev_hdl = link_info->dev_hdls[i];
+		dev->parent = (void *)link;
+		dev->dev_info.dev_hdl = dev->dev_hdl;
+		rc = dev->ops->get_dev_info(&dev->dev_info);
+		CRM_DBG("%x: connected: %s, id %d, delay %d",
+			link_info->session_hdl, dev->dev_info.name,
+			dev->dev_info.dev_id, dev->dev_info.p_delay);
+		if (rc < 0 ||
+			dev->dev_info.p_delay >=
+			CAM_PIPELINE_DELAY_MAX ||
+			dev->dev_info.p_delay <
+			CAM_PIPELINE_DELAY_0) {
+			CRM_ERR("get device info failed");
+			goto error;
+		} else {
+			CRM_DBG("%x: connected: %s, delay %d",
+				link_info->session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.p_delay);
+			if (dev->dev_info.p_delay >
+				max_delay)
+			max_delay =
+				dev->dev_info.p_delay;
+		}
 	}
 
-	return ret;
+
+	link_data.link_enable = 1;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = &cam_req_mgr_ops;
+	link_data.max_delay = max_delay;
+
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+
+		link_data.dev_hdl = dev->dev_hdl;
+		/*
+		 * For unique pipeline delay table create request
+		 * tracking table
+		 */
+		if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
+			pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
+				dev->dev_info.p_delay);
+			if (!pd_tbl) {
+				CRM_ERR("pd %d tbl not found",
+					dev->dev_info.p_delay);
+				rc = -ENXIO;
+				goto error;
+			}
+		} else {
+			pd_tbl = __cam_req_mgr_create_pd_tbl(
+				dev->dev_info.p_delay);
+			if (pd_tbl == NULL) {
+				CRM_ERR("create new pd tbl failed");
+				rc = -ENXIO;
+				goto error;
+			}
+			pd_tbl->pd = dev->dev_info.p_delay;
+			link->pd_mask |= (1 << pd_tbl->pd);
+			/*
+			 * Add table to list and also sort list
+			 * from max pd to lowest
+			 */
+			__cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
+		}
+		dev->dev_bit = pd_tbl->dev_count++;
+		dev->pd_tbl = pd_tbl;
+		pd_tbl->dev_mask |= (1 << dev->dev_bit);
+
+		/* Communicate with dev to establish the link */
+		dev->ops->link_setup(&link_data);
+
+		if (link->max_delay < dev->dev_info.p_delay)
+			link->max_delay = dev->dev_info.p_delay;
+	}
+	link->num_devs = link_info->num_devices;
+
+	/* Assign id for pd tables */
+	__cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
+
+	/* At start, expect max pd devices, all are in skip state */
+	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+
+	mutex_unlock(&link->lock);
+	return 0;
+
+error:
+	__cam_req_mgr_destroy_link_info(link);
+	return rc;
 }
 
+/* IOCTLs handling section */
 int cam_req_mgr_create_session(
 	struct cam_req_mgr_session_info *ses_info)
 {
-	int ret = 0;
-	int32_t i;
-	int32_t session_hdl;
-	struct cam_req_mgr_core_session *cam_session;
+	int                              rc = 0;
+	int32_t                          session_hdl;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		kzalloc(sizeof(*cam_session), GFP_KERNEL);
 	if (!cam_session) {
-		ret = -ENOMEM;
+		rc = -ENOMEM;
 		goto end;
 	}
 
 	session_hdl = cam_create_session_hdl((void *)cam_session);
 	if (session_hdl < 0) {
 		CRM_ERR("unable to create session_hdl = %x", session_hdl);
-		ret = session_hdl;
-		goto session_hdl_failed;
+		rc = session_hdl;
+		kfree(cam_session);
+		goto end;
 	}
 	ses_info->session_hdl = session_hdl;
+
+	mutex_init(&cam_session->lock);
+	CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+
+	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
-
-	spin_lock_init(&cam_session->lock);
-	cam_session->num_active_links = 0;
-
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		spin_lock_init(&cam_session->links[i].lock);
-		cam_session->links[i].link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-		INIT_LIST_HEAD(&cam_session->links[i].link_head);
-		cam_session->links[i].workq = NULL;
-	}
+	cam_session->num_links = 0;
 	list_add(&cam_session->entry, &g_crm_core_dev->session_head);
-
-	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
-session_hdl_failed:
-	kfree(cam_session);
+	mutex_unlock(&cam_session->lock);
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
+	return rc;
 }
 
 int cam_req_mgr_destroy_session(
 		struct cam_req_mgr_session_info *ses_info)
 {
-	int ret;
-	int32_t i;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link = NULL;
+	int rc;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(ses_info->session_hdl);
-	if (cam_session == NULL) {
+	if (!cam_session) {
 		CRM_ERR("failed to get session priv");
-		ret = -ENOENT;
+		rc = -ENOENT;
 		goto end;
 
 	}
-	spin_lock(&cam_session->lock);
-	for (i = 0; i < cam_session->num_active_links; i++) {
-		link = &cam_session->links[i];
-		CRM_ERR("session %x active_links %d hdl %x connections %d",
+	mutex_lock(&cam_session->lock);
+	if (cam_session->num_links) {
+		CRM_ERR("destroy session %x num_active_links %d",
 			ses_info->session_hdl,
-			cam_session->num_active_links,
-			link->link_hdl, link->num_connections);
+			cam_session->num_links);
+		/* @TODO : Go through active links and destroy ? */
 	}
 	list_del(&cam_session->entry);
-	spin_unlock(&cam_session->lock);
+	mutex_unlock(&cam_session->lock);
+	mutex_destroy(&cam_session->lock);
 	kfree(cam_session);
 
-	ret = cam_destroy_session_hdl(ses_info->session_hdl);
-	if (ret)
-		CRM_ERR("unable to destroy session_hdl = %x ret %d",
-			ses_info->session_hdl, ret);
+	rc = cam_destroy_session_hdl(ses_info->session_hdl);
+	if (rc < 0)
+		CRM_ERR("unable to destroy session_hdl = %x rc %d",
+			ses_info->session_hdl, rc);
 
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
+	return rc;
 }
 
 int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 {
-	int ret = 0;
-	int32_t i, link_hdl;
-	char buf[128];
-	struct cam_create_dev_hdl root_dev;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_core_dev_link_setup link_data;
-	struct cam_req_mgr_connected_device *l_devices;
-	enum cam_pipeline_delay max_delay = CAM_PIPELINE_DELAY_0;
+	int                                     rc = 0;
+	char                                    buf[128];
+	struct cam_create_dev_hdl               root_dev;
+	struct cam_req_mgr_core_session        *cam_session;
+	struct cam_req_mgr_core_link           *link;
 
 	if (!link_info) {
-		CRM_ERR("NULL pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
-
 	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
 		CRM_ERR("Invalid num devices %d", link_info->num_devices);
 		return -EINVAL;
 	}
 
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(link_info->session_hdl);
 	if (!cam_session) {
-		CRM_ERR("NULL session pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_reserve_link(cam_session);
+	mutex_lock(&g_crm_core_dev->crm_lock);
+
+	/* Allocate link struct and map it with session's request queue */
+	link = __cam_req_mgr_reserve_link(cam_session);
 	if (!link) {
-		CRM_ERR("NULL link pointer");
+		CRM_ERR("failed to reserve new link");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	CRM_DBG("link reserved %pK %x", link, link->link_hdl);
 
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->session_hdl;
 	root_dev.priv = (void *)link;
 
-	link_hdl = cam_create_device_hdl(&root_dev);
-	if (link_hdl < 0) {
+	mutex_lock(&link->lock);
+	/* Create unique dev handle for link */
+	link->link_hdl = cam_create_device_hdl(&root_dev);
+	if (link->link_hdl < 0) {
 		CRM_ERR("Insufficient memory to create new device handle");
-		ret = link_hdl;
+		mutex_unlock(&link->lock);
+		rc = link->link_hdl;
 		goto link_hdl_fail;
 	}
+	mutex_unlock(&link->lock);
+	link_info->link_hdl = link->link_hdl;
 
-	l_devices = cam_req_mgr_pvt_create_subdevs(link_info->num_devices);
-	if (!l_devices) {
-		ret = -ENOMEM;
+	/* Allocate memory to hold data of all linked devs */
+	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+		link_info->num_devices);
+	if (rc < 0) {
+		CRM_ERR("Insufficient memory to create new crm subdevs");
 		goto create_subdev_failed;
 	}
 
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->get_dev_info) {
-				ret = l_devices[i].ops->get_dev_info(
-					&l_devices[i].dev_info);
-				if (ret < 0 ||
-					l_devices[i].dev_info.p_delay >=
-					CAM_PIPELINE_DELAY_MAX ||
-					l_devices[i].dev_info.p_delay <
-					CAM_PIPELINE_DELAY_0) {
-					CRM_ERR("get device info failed");
-					goto error;
-				} else {
-					CRM_DBG("%x: connected: %s, delay %d",
-						link_info->session_hdl,
-						l_devices[i].dev_info.name,
-						l_devices[i].dev_info.p_delay);
-					if (l_devices[i].dev_info.p_delay >
-						max_delay)
-					max_delay =
-						l_devices[i].dev_info.p_delay;
-				}
-			}
-		} else {
-			CRM_ERR("FATAL: device ops NULL");
-			ret = -ENXIO;
-			goto error;
-		}
-	}
+	/* Using device ops query connected devs, prepare request tables */
+	rc = __cam_req_mgr_setup_link_info(link, link_info);
+	if (rc < 0)
+		goto setup_failed;
 
-	link_data.link_enable = true;
-	link_data.link_hdl = link_hdl;
-	link_data.crm_cb = &cam_req_mgr_ops;
-	link_data.max_delay = max_delay;
-
-	/* After getting info about all devices, establish link */
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->link_setup) {
-				ret = l_devices[i].ops->link_setup(&link_data);
-				if (ret < 0) {
-					/* TODO check handlng of this failure */
-					CRM_ERR("link setup failed");
-					goto error;
-				}
-			}
-		}
-		list_add_tail(&l_devices[i].entry, &link->link_head);
-	}
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_READY;
+	mutex_unlock(&link->lock);
 
 	/* Create worker for current link */
-	snprintf(buf, sizeof(buf), "%x-%x", link_info->session_hdl, link_hdl);
-	ret = cam_req_mgr_workq_create(buf, &link->workq);
-	if (ret < 0) {
+	snprintf(buf, sizeof(buf), "%x-%x",
+		link_info->session_hdl, link->link_hdl);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
-		goto error;
+		__cam_req_mgr_destroy_link_info(link);
+		goto setup_failed;
 	}
 
-	link_info->link_hdl = link_hdl;
-	spin_lock(&link->lock);
-	link->l_devices = l_devices;
-	link->link_hdl = link_hdl;
-	link->parent = (void *)cam_session;
-	link->num_connections = link_info->num_devices;
-	link->link_state = CAM_CRM_LINK_STATE_READY;
-	spin_unlock(&link->lock);
+	/* Assign payload to workqueue tasks */
+	rc = __cam_req_mgr_setup_payload(link->workq);
+	if (rc < 0) {
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links++;
-	spin_unlock(&cam_session->lock);
+	/* Start watchdong timer to detect if camera hw goes into bad state */
+	rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
+		link, &__cam_req_mgr_sof_freeze);
+	if (rc < 0) {
+		kfree(link->workq->task.pool[0].payload);
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	return ret;
-
-error:
-	cam_req_mgr_pvt_destroy_subdev(&l_devices);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+setup_failed:
+	__cam_req_mgr_destroy_subdev(link->l_dev);
 create_subdev_failed:
-	cam_destroy_device_hdl(link_hdl);
+	cam_destroy_device_hdl(link->link_hdl);
+	link_info->link_hdl = 0;
 link_hdl_fail:
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	spin_unlock(&link->lock);
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	mutex_unlock(&link->lock);
 
-	return ret;
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
 }
 
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
 {
-	int ret = 0;
-	int32_t i = 0;
+	int                              rc = 0;
 	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_connected_device *device;
-	struct cam_req_mgr_core_dev_link_setup link_data;
+	struct cam_req_mgr_core_link    *link;
 
 	if (!unlink_info) {
 		CRM_ERR("NULL pointer");
 		return -EINVAL;
 	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
-	cam_get_device_priv(unlink_info->session_hdl);
+		cam_get_device_priv(unlink_info->session_hdl);
 	if (!cam_session) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_find_link(cam_session,
-		unlink_info->link_hdl);
+	/* link hdl's priv data is core_link struct */
+	link = cam_get_device_priv(unlink_info->link_hdl);
 	if (!link) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	__cam_req_mgr_print_req_tbl(&link->req);
 
-	ret = cam_destroy_device_hdl(link->link_hdl);
-	if (ret < 0) {
-		CRM_ERR("error in destroying dev handle %d %x",
-			ret, link->link_hdl);
-		ret = -EINVAL;
-	}
-	link_data.link_enable = false;
-	link_data.link_hdl = link->link_hdl;
-	link_data.crm_cb = NULL;
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		link_data.dev_hdl = device->dev_hdl;
-		if (device->ops && device->ops->link_setup)
-			device->ops->link_setup(&link_data);
-		device->dev_hdl = 0;
-		device->parent = NULL;
-		device->ops = NULL;
-		list_del(&device->entry);
-	}
-	/* Destroy worker of link */
-	cam_req_mgr_workq_destroy(link->workq);
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	link->parent = NULL;
-	link->num_connections = 0;
-	link->link_hdl = 0;
-	link->workq = NULL;
-	spin_unlock(&link->lock);
+	/* Destroy workq payload data */
+	kfree(link->workq->task.pool[0].payload);
+	link->workq->task.pool[0].payload = NULL;
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links--;
-	spin_unlock(&cam_session->lock);
+	/* Destroy workq and timer of link */
+	crm_timer_exit(&link->watchdog);
 
-	ret = cam_req_mgr_pvt_destroy_subdev(&link->l_devices);
-	if (ret < 0) {
-		CRM_ERR("error while destroying subdev link %x",
-			link_data.link_hdl);
-		ret = -EINVAL;
+	cam_req_mgr_workq_destroy(&link->workq);
+
+	/* Cleanuprequest tables */
+	__cam_req_mgr_destroy_link_info(link);
+
+	/* Free memory holding data of linked devs */
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+
+	/* Destroy the link handle */
+	rc = cam_destroy_device_hdl(unlink_info->link_hdl);
+	if (rc < 0) {
+		CRM_ERR("error while destroying dev handle %d %x",
+			rc, link->link_hdl);
 	}
 
-	return ret;
+	/* Free curent link and put back into session's free pool of links */
+	__cam_req_mgr_unreserve_link(cam_session, &link);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+
+	return rc;
 }
 
 int cam_req_mgr_schedule_request(
 			struct cam_req_mgr_sched_request *sched_req)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_core_session  *session = NULL;
+	struct cam_req_mgr_sched_request *sched;
+	struct crm_task_payload          *task_data;
+
 	if (!sched_req) {
-		CRM_ERR("NULL pointer");
+		CRM_ERR("csl_req is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(sched_req->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+		return -EINVAL;
+	}
+	session = (struct cam_req_mgr_core_session *)link->parent;
+	if (!session) {
+		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task)
+		return -ENOMEM;
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	sched->req_id = sched_req->req_id;
+	sched->link_hdl = sched_req->link_hdl;
+	if (session->force_err_recovery == AUTO_RECOVERY) {
+		sched->bubble_enable = sched_req->bubble_enable;
+	} else {
+		sched->bubble_enable =
+		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
+	}
+	task->process_cb = &cam_req_mgr_process_sched_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
-int cam_req_mgr_sync_mode(
-			struct cam_req_mgr_sync_mode *sync_links)
+int cam_req_mgr_sync_link(
+	struct cam_req_mgr_sync_mode *sync_links)
 {
 	if (!sync_links) {
 		CRM_ERR("NULL pointer");
@@ -611,15 +1983,70 @@
 }
 
 int cam_req_mgr_flush_requests(
-			struct cam_req_mgr_flush_info *flush_info)
+	struct cam_req_mgr_flush_info *flush_info)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_flush_info    *flush;
+	struct crm_task_payload          *task_data;
+	struct cam_req_mgr_core_session  *session = NULL;
+
 	if (!flush_info) {
-		CRM_ERR("NULL pointer");
-		return -EINVAL;
+		CRM_ERR("flush req is NULL");
+		rc = -EFAULT;
+		goto end;
+	}
+	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
+		CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+		rc = -EINVAL;
+		goto end;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	/* session hdl's priv data is cam session struct */
+	session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(flush_info->session_hdl);
+	if (!session) {
+		CRM_ERR("Invalid session %x", flush_info->session_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	if (session->num_links <= 0) {
+		CRM_WARN("No active links in session %x",
+		flush_info->session_hdl);
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(flush_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
+	flush = (struct cam_req_mgr_flush_info *)&task_data->u;
+	flush->req_id = flush_info->req_id;
+	flush->link_hdl = flush_info->link_hdl;
+	flush->flush_type = flush_info->flush_type;
+	task->process_cb = &cam_req_mgr_process_flush_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
 
@@ -639,6 +2066,7 @@
 	CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
 	INIT_LIST_HEAD(&g_crm_core_dev->session_head);
 	mutex_init(&g_crm_core_dev->crm_lock);
+	cam_req_mgr_debug_register(g_crm_core_dev);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 7679f20..889ee9c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -14,213 +14,344 @@
 
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_timer.h"
 
-#define CAM_REQ_MGR_MAX_LINKED_DEV 16
+#define CAM_REQ_MGR_MAX_LINKED_DEV     16
+#define MAX_REQ_SLOTS                  48
+
+#define CAM_REQ_MGR_WATCHDOG_TIMEOUT   5000
+#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT  1000
+#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
+
+#define FORCE_DISABLE_RECOVERY  2
+#define FORCE_ENABLE_RECOVERY   1
+#define AUTO_RECOVERY           0
+
+#define CRM_WORKQ_NUM_TASKS 30
 
 /**
- * enum crm_req_status
- * State machine for life cycle of request in link
- * EMPTY - indicates req slot is empty
- * PENDING - indicates req slot is waiting for reqs from all devs
- * READY - indicates req slot is ready to be sent to devs
- * APPLIED - indicates req slot is sent to devices
- * INVALID - indicates req slot is not in valid state
+ * enum crm_workq_task_type
+ * @codes: to identify which type of task is present
  */
-enum crm_req_status {
-	CRM_REQ_STATUS_EMPTY,
-	CRM_REQ_STATUS_PENDING,
-	CRM_REQ_STATUS_READY,
-	CRM_REQ_STATUS_APPLIED,
-	CRM_REQ_STATUS_INVALID,
+enum crm_workq_task_type {
+	CRM_WORKQ_TASK_GET_DEV_INFO,
+	CRM_WORKQ_TASK_SETUP_LINK,
+	CRM_WORKQ_TASK_DEV_ADD_REQ,
+	CRM_WORKQ_TASK_APPLY_REQ,
+	CRM_WORKQ_TASK_NOTIFY_SOF,
+	CRM_WORKQ_TASK_NOTIFY_ERR,
+	CRM_WORKQ_TASK_SCHED_REQ,
+	CRM_WORKQ_TASK_FLUSH_REQ,
+	CRM_WORKQ_TASK_INVALID,
+};
+
+/**
+ * struct crm_task_payload
+ * @type       : to identify which type of task is present
+ * @u          : union of payload of all types of tasks supported
+ * @sched_req  : contains info of  incoming reqest from CSL to CRM
+ * @flush_info : contains info of cancelled reqest
+ * @dev_req    : contains tracking info of available req id at device
+ * @send_req   : contains info of apply settings to be sent to devs in link
+ * @apply_req  : contains info of which request is applied at device
+ * @notify_sof : contains notification from IFE to CRM about SOF trigger
+ * @notify_err : contains error info happened while processing request
+ * -
+ */
+struct crm_task_payload {
+	enum crm_workq_task_type type;
+	union {
+		struct cam_req_mgr_sched_request        sched_req;
+		struct cam_req_mgr_flush_info           flush_info;
+		struct cam_req_mgr_add_request          dev_req;
+		struct cam_req_mgr_send_request         send_req;
+		struct cam_req_mgr_sof_notify           notify_sof;
+		struct cam_req_mgr_error_notify         notify_err;
+	} u;
+};
+
+/**
+ * enum crm_req_state
+ * State machine for life cycle of request in pd table
+ * EMPTY   : indicates req slot is empty
+ * PENDING : indicates req slot is waiting for reqs from all devs
+ * READY   : indicates req slot is ready to be sent to devs
+ * INVALID : indicates req slot is not in valid state
+ */
+enum crm_req_state {
+	CRM_REQ_STATE_EMPTY,
+	CRM_REQ_STATE_PENDING,
+	CRM_REQ_STATE_READY,
+	CRM_REQ_STATE_INVALID,
+};
+
+/**
+ * enum crm_slot_status
+ * State machine for life cycle of request in input queue
+ * NO_REQ     : empty slot
+ * REQ_ADDED  : new entry in slot
+ * INCOMPLETE : waiting for
+ * APPLIED    : req is sent to devices
+ * INVALID    : invalid state
+ */
+enum crm_slot_status {
+	CRM_SLOT_STATUS_NO_REQ,
+	CRM_SLOT_STATUS_REQ_ADDED,
+	CRM_SLOT_STATUS_REQ_PENDING,
+	CRM_SLOT_STATUS_REQ_APPLIED,
+	CRM_SLOT_STATUS_INVALID,
 };
 
 /**
  * enum cam_req_mgr_link_state
  * State machine for life cycle of link in crm
- * AVAILABLE - indicates link is not in use
- * IDLE - indicates link is reserved but not initialized
- * READY - indicates link is initialized and ready for operation
- * STREAMING - indicates link is receiving triggers and requests
- * BUBBLE_DETECTED - indicates device on link is in bad shape
- * ROLLBACK_STARTED - indicates link had triggered error recovery
- * MAX - indicates link max as invalid
+ * AVAILABLE  : link available
+ * IDLE       : link initialized but not ready yet
+ * READY      : link is ready for use
+ * ERR	      : link has encountered error
+ * MAX        : invalid state
  */
 enum cam_req_mgr_link_state {
 	CAM_CRM_LINK_STATE_AVAILABLE,
 	CAM_CRM_LINK_STATE_IDLE,
 	CAM_CRM_LINK_STATE_READY,
-	CAM_CRM_LINK_STATE_STREAMING,
-	CAM_CRM_LINK_STATE_BUBBLE_DETECTED,
-	CAM_CRM_LINK_STATE_ROLLBACK_STARTED,
-	CAM_CRM_LINK_STATE_DEVICE_STATE_MAX,
+	CAM_CRM_LINK_STATE_ERR,
+	CAM_CRM_LINK_STATE_MAX,
 };
 
 /**
- * struct cam_req_mgr_request_slot
- * @idx: device handle
- * @req_status: state machine for life cycle of a request
- * @request_id: request id value
+ * struct cam_req_mgr_traverse
+ * @idx        : slot index
+ * @result     : contains which all tables were able to apply successfully
+ * @tbl        : pointer of pipeline delay based request table
+ * @apply_data : pointer which various tables will update during traverse
+ * @in_q       : input request queue pointer
  */
-struct cam_req_mgr_request_slot {
+struct cam_req_mgr_traverse {
+	int32_t                       idx;
+	uint32_t                      result;
+	struct cam_req_mgr_req_tbl   *tbl;
+	struct cam_req_mgr_apply     *apply_data;
+	struct cam_req_mgr_req_queue *in_q;
+};
+
+/**
+ * struct cam_req_mgr_apply
+ * @idx      : corresponding input queue slot index
+ * @pd       : pipeline delay of device
+ * @req_id   : req id for dev with above pd to process
+ * @skip_idx: skip applying settings when this is set.
+ */
+struct cam_req_mgr_apply {
 	int32_t idx;
-	enum crm_req_status req_status;
-	int64_t request_id;
+	int32_t pd;
+	int64_t req_id;
+	int32_t skip_idx;
 };
 
 /**
- * struct cam_req_mgr_request_queue
- * @read_index: idx currently being processed
- * @write_index: idx at which incoming req is stored
- * @num_slots: num of req slots i.e. queue depth
- * @req_slot: slots which hold the request info
+ * struct cam_req_mgr_tbl_slot
+ * @idx           : slot index
+ * @req_ready_map : mask tracking which all devices have request ready
+ * @state         : state machine for life cycle of a slot
  */
-struct cam_req_mgr_request_queue {
-	int32_t read_index;
-	int32_t write_index;
-	uint32_t num_slots;
-	struct cam_req_mgr_request_slot *req_slot;
+struct cam_req_mgr_tbl_slot {
+	int32_t             idx;
+	uint32_t            req_ready_map;
+	enum crm_req_state  state;
 };
 
 /**
- * struct cam_req_mgr_frame_settings
- * @request_id: request id to apply
- * @frame_id: frame id for debug purpose
+ * struct cam_req_mgr_req_tbl
+ * @id            : table indetifier
+ * @pd            : pipeline delay of table
+ * @dev_count     : num of devices having same pipeline delay
+ * @dev_mask      : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ *              by this table especially in the beginning or bubble recovery
+ * @next          : pointer to next pipeline delay request table
+ * @pd_delta      : differnce between this table's pipeline delay and next
+ * @num_slots     : number of request slots present in the table
+ * @slot          : array of slots tracking requests availability at devices
  */
-struct cam_req_mgr_frame_settings {
-	int64_t request_id;
-	int64_t frame_id;
+struct cam_req_mgr_req_tbl {
+	int32_t                     id;
+	int32_t                     pd;
+	int32_t                     dev_count;
+	int32_t                     dev_mask;
+	int32_t                     skip_traverse;
+	struct cam_req_mgr_req_tbl *next;
+	int32_t                     pd_delta;
+	int32_t                     num_slots;
+	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
 };
 
 /**
- * struct cam_req_mgr_request_table
- * @pipeline_delay: pipeline delay of this req table
- * @l_devices: list of devices belonging to this p_delay
- * @dev_mask: each dev hdl has unique bit assigned, dev mask tracks if all devs
- *  received req id packet from UMD to process
+ * struct cam_req_mgr_slot
+ * - Internal Book keeping
+ * @idx      : slot index
+ * @skip_idx : if req id in this slot needs to be skipped/not applied
+ * @status   : state machine for life cycle of a slot
+ * - members updated due to external events
+ * @recover  : if user enabled recovery for this request.
+ * @req_id   : mask tracking which all devices have request ready
  */
-struct cam_req_mgr_request_table {
-	uint32_t pipeline_delay;
-	struct list_head l_devices;
-	uint32_t dev_mask;
+struct cam_req_mgr_slot {
+	int32_t               idx;
+	int32_t               skip_idx;
+	enum crm_slot_status  status;
+	int32_t               recover;
+	int64_t               req_id;
+};
+
+/**
+ * struct cam_req_mgr_req_queue
+ * @num_slots   : max num of input queue slots
+ * @slot        : request slot holding incoming request id and bubble info.
+ * @rd_idx      : indicates slot index currently in process.
+ * @wr_idx      : indicates slot index to hold new upcoming req.
+ */
+struct cam_req_mgr_req_queue {
+	int32_t                     num_slots;
+	struct cam_req_mgr_slot     slot[MAX_REQ_SLOTS];
+	int32_t                     rd_idx;
+	int32_t                     wr_idx;
+};
+
+/**
+ * struct cam_req_mgr_req_data
+ * @in_q        : Poiner to Input request queue
+ * @l_tbl       : unique pd request tables.
+ * @num_tbl     : how many unique pd value devices are present
+ * @apply_data	: Holds information about request id for a request
+ * @lock        : mutex lock protecting request data ops.
+ */
+struct cam_req_mgr_req_data {
+	struct cam_req_mgr_req_queue *in_q;
+	struct cam_req_mgr_req_tbl   *l_tbl;
+	int32_t                       num_tbl;
+	struct cam_req_mgr_apply      apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct mutex                  lock;
 };
 
 /**
  * struct cam_req_mgr_connected_device
- *- Device Properties
- * @dev_hdl: device handle
- * @dev_bit: unique bit assigned to device in link
- * -Device progress status
- * @available_req_id: tracks latest available req id at this device
- * @processing_req_id: tracks request id currently processed
+ * - Device Properties
+ * @dev_hdl  : device handle
+ * @dev_bit  : unique bit assigned to device in link
  * - Device characteristics
- * @dev_info: holds dev characteristics such as pipeline delay, dev name
- * @ops: holds func pointer to call methods on this device
- * @parent: pvt data - Pointer to parent link device its connected with
- * @entry: entry to the list of connected devices in link
+ * @pd_tbl   : tracks latest available req id at this device
+ * @dev_info : holds dev characteristics such as pipeline delay, dev name
+ * @ops      : holds func pointer to call methods on this device
+ * @parent   : pvt data - like link which this dev hdl belongs to
  */
 struct cam_req_mgr_connected_device {
-	int32_t dev_hdl;
-	int64_t dev_bit;
-	int64_t available_req_id;
-	int64_t processing_req_id;
-	struct cam_req_mgr_device_info dev_info;
-	struct cam_req_mgr_kmd_ops *ops;
-	void *parent;
-	struct list_head entry;
+	int32_t                         dev_hdl;
+	int64_t                         dev_bit;
+	struct cam_req_mgr_req_tbl     *pd_tbl;
+	struct cam_req_mgr_device_info  dev_info;
+	struct cam_req_mgr_kmd_ops     *ops;
+	void                           *parent;
 };
 
 /**
  * struct cam_req_mgr_core_link
- * - Link Properties
- * @link_hdl: Link identifier
- * @num_connections: num of connected devices to this link
- * @max_pipeline_delay: Max of pipeline delay of all connected devs
- * - Input request queue
- * @in_requests: Queue to hold incoming request hints from CSL
- * @workq: Pointer to handle workq related jobs
+ * -  Link Properties
+ * @link_hdl       : Link identifier
+ * @num_devs       : num of connected devices to this link
+ * @max_delay      : Max of pipeline delay of all connected devs
+ * @workq          : Pointer to handle workq related jobs
+ * @pd_mask        : each set bit indicates the device with pd equal to bit
+ *                   position is available.
  * - List of connected devices
- * @l_devices: List of connected devices to this link
- * @fs_list: Holds the request id which each device in link will consume.
- * @req_table: table to keep track of req ids recived at each dev handle
+ * @l_dev          : List of connected devices to this link
+ * - Request handling data struct
+ * @req            : req data holder.
+ * - Timer
+ * @watchdog       : watchdog timer to recover from sof freeze
  * - Link private data
- * @link_state: link state cycle
- * @parent: pvt data - like session info
- * @link_head: List head of connected devices
- * @lock: spin lock to guard link data operations
+ * @workq_comp     : conditional variable to block user thread for workq to
+ *                   finish schedule request processing
+ * @state          : link state machine
+ * @parent         : pvt data - link's parent is session
+ * @lock           : mutex lock to guard link data operations
  */
 struct cam_req_mgr_core_link {
-	int32_t link_hdl;
-	int32_t num_connections;
-	enum cam_pipeline_delay max_pipeline_delay;
-	struct cam_req_mgr_request_queue in_requests;
-	struct cam_req_mgr_core_workq *workq;
-	struct cam_req_mgr_connected_device *l_devices;
-	struct cam_req_mgr_frame_settings fs_list[CAM_REQ_MGR_MAX_LINKED_DEV];
-	struct cam_req_mgr_request_table req_table[CAM_PIPELINE_DELAY_MAX];
-	enum cam_req_mgr_link_state link_state;
-	void *parent;
-	struct list_head link_head;
-	spinlock_t lock;
+	int32_t                              link_hdl;
+	int32_t                              num_devs;
+	enum cam_pipeline_delay              max_delay;
+	struct cam_req_mgr_core_workq       *workq;
+	int32_t                              pd_mask;
+	struct cam_req_mgr_connected_device *l_dev;
+	struct cam_req_mgr_req_data          req;
+	struct cam_req_mgr_timer            *watchdog;
+	struct completion                    workq_comp;
+	enum cam_req_mgr_link_state          state;
+	void                                *parent;
+	struct mutex                         lock;
 };
 
 /**
  * struct cam_req_mgr_core_session
  * - Session Properties
- * @session_hdl: session identifier
- * @num_active_links: num of active links for current session
+ * @session_hdl        : session identifier
+ * @num_links          : num of active links for current session
  * - Links of this session
- * @links: pointer to array of links within session
+ * @links              : pointer to array of links within session
+ * @in_q               : Input request queue one per session
  * - Session private data
- * @entry: pvt data - entry in the list of sessions
- * @lock: pvt data - spin lock to guard session data
+ * @entry              : pvt data - entry in the list of sessions
+ * @lock               : pvt data - spin lock to guard session data
+ * - Debug data
+ * @force_err_recovery : For debugging, we can force bubble recovery
+ *                       to be always ON or always OFF using debugfs.
  */
 struct cam_req_mgr_core_session {
-	int32_t session_hdl;
-	uint32_t num_active_links;
-	struct cam_req_mgr_core_link links[MAX_LINKS_PER_SESSION];
-	struct list_head entry;
-	spinlock_t lock;
+	int32_t                       session_hdl;
+	uint32_t                      num_links;
+	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
+	struct cam_req_mgr_req_queue  in_q;
+	struct list_head              entry;
+	struct mutex                  lock;
+	int32_t                       force_err_recovery;
 };
 
 /**
  * struct cam_req_mgr_core_device
  * - Core camera request manager data struct
- * @session_head: list head holding sessions
- * @crm_lock: mutex lock to protect session creation & destruction
+ * @session_head : list head holding sessions
+ * @crm_lock     : mutex lock to protect session creation & destruction
  */
 struct cam_req_mgr_core_device {
-	struct list_head session_head;
-	struct mutex crm_lock;
+	struct list_head             session_head;
+	struct mutex                 crm_lock;
 };
 
-/* cam_req_mgr_dev to cam_req_mgr_core internal functions */
 /**
  * cam_req_mgr_create_session()
- * @brief: creates session
- * @ses_info: output param for session handle
+ * @brief    : creates session
+ * @ses_info : output param for session handle
  *
- * Called as part of session creation.
+ * called as part of session creation.
  */
-int cam_req_mgr_create_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_destroy_session()
- * @brief: destroy session
- * @ses_info: session handle info, input param
+ * @brief    : destroy session
+ * @ses_info : session handle info, input param
  *
  * Called as part of session destroy
  * return success/failure
  */
-int cam_req_mgr_destroy_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_link()
- * @brief: creates a link for a session
- * @link_info: handle and session info to create a link
+ * @brief     : creates a link for a session
+ * @link_info : handle and session info to create a link
  *
- * Link is formed in a session for multiple devices. It creates
+ * link is formed in a session for multiple devices. it creates
  * a unique link handle for the link and is specific to a
  * session. Returns link handle
  */
@@ -228,10 +359,10 @@
 
 /**
  * cam_req_mgr_unlink()
- * @brief: destroy a link in a session
- * @unlink_info: session and link handle info
+ * @brief       : destroy a link in a session
+ * @unlink_info : session and link handle info
  *
- * Link is destroyed in a session
+ * link is destroyed in a session
  */
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info);
 
@@ -244,11 +375,11 @@
 	struct cam_req_mgr_sched_request *sched_req);
 
 /**
- * cam_req_mgr_sync_mode()
+ * cam_req_mgr_sync_link()
  * @brief: sync for links in a session
  * @sync_links: session, links info and master link info
  */
-int cam_req_mgr_sync_mode(struct cam_req_mgr_sync_mode *sync_links);
+int cam_req_mgr_sync_link(struct cam_req_mgr_sync_mode *sync_links);
 
 /**
  * cam_req_mgr_flush_requests()
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index cf2fe7f..2a831e8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -15,6 +15,16 @@
 #define CRM_TRACE_ENABLE 0
 #define CRM_DEBUG_MUTEX 0
 
+#define SET_SUCCESS_BIT(ret, pd)	{\
+	(ret) |= (1 << (pd));	\
+	}
+
+#define SET_FAILURE_BIT(ret, pd)	{\
+	(ret) &= (0 << (pd));	\
+	}
+
+#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
+
 #if (CRM_TRACE_ENABLE == 1)
 	#define CRM_DBG(fmt, args...) do { \
 	trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
new file mode 100644
index 0000000..19833d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_debug.h"
+
+#define MAX_SESS_INFO_LINE_BUFF_LEN 256
+
+static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+
+static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
+{
+	struct cam_req_mgr_core_device  *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+	int rc = 0;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			session->force_err_recovery = val;
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return rc;
+}
+
+static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val)
+{
+	struct cam_req_mgr_core_device *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		session = list_first_entry(&core_dev->session_head,
+			struct cam_req_mgr_core_session,
+			entry);
+		*val = session->force_err_recovery;
+	}
+	mutex_unlock(&core_dev->crm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery,
+	cam_req_mgr_debug_set_bubble_recovery, "%lld\n");
+
+static int session_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t session_info_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = sess_info_buffer;
+	char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0};
+	struct cam_req_mgr_core_device *core_dev =
+		(struct cam_req_mgr_core_device *) t_file->private_data;
+	struct cam_req_mgr_core_session *session;
+
+	memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			snprintf(line_buffer, sizeof(line_buffer),
+				"session_hdl = %x \t"
+				"num_links = %d\n",
+				session->session_hdl, session->num_links);
+			strlcat(out_buffer, line_buffer,
+				sizeof(sess_info_buffer));
+			for (i = 0; i < session->num_links; i++) {
+				snprintf(line_buffer, sizeof(line_buffer),
+					"link_hdl[%d] = 0x%x, num_devs connected = %d\n",
+					i, session->links[i]->link_hdl,
+					session->links[i]->num_devs);
+				strlcat(out_buffer, line_buffer,
+					sizeof(sess_info_buffer));
+			}
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t session_info_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	return 0;
+}
+
+static const struct file_operations session_info = {
+	.open = session_info_open,
+	.read = session_info_read,
+	.write = session_info_write,
+};
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
+{
+	struct dentry *debugfs_root;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "cam_req_mgr");
+	debugfs_root = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_root)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("sessions_info", 0644,
+		debugfs_root, core_dev, &session_info))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bubble_recovery", 0644,
+		debugfs_root, core_dev, &bubble_recovery))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
new file mode 100644
index 0000000..82ac764
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_DEBUG_H_
+#define _CAM_REQ_MGR_DEBUG_H_
+
+#include <linux/debugfs.h>
+#include "cam_req_mgr_core.h"
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 43b020c6..13affe9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -326,7 +326,7 @@
 			return -EFAULT;
 		}
 
-		rc = cam_req_mgr_sync_mode(&sync_mode);
+		rc = cam_req_mgr_sync_link(&sync_mode);
 		}
 		break;
 	case CAM_REQ_MGR_ALLOC_BUF: {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 174a725..91860f6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -18,15 +18,14 @@
 #include "cam_req_mgr_core_defs.h"
 #include "cam_req_mgr_util.h"
 
-/* Forward declarations */
 struct cam_req_mgr_sof_notify;
 struct cam_req_mgr_error_notify;
 struct cam_req_mgr_add_request;
 struct cam_req_mgr_device_info;
 struct cam_req_mgr_core_dev_link_setup;
 struct cam_req_mgr_apply_request;
-
-/*Ops table for req mgr - kmd communication */
+struct cam_req_mgr_flush_request;
+struct cam_req_mgr_link_evt_data;
 
 /* Request Manager -- camera device driver interface */
 /**
@@ -44,21 +43,25 @@
  * @brief: cam req mgr to camera device drivers
  *
  * @cam_req_mgr_get_dev_info: to fetch details about device linked
- * @cam_req_mgr_link_setup: to establish link with device for a session
- * @cam_req_mgr_notify_err: to broadcast error happened on link for request id
- * @cam_req_mgr_apply_req: CRM asks device to apply certain request id.
+ * @cam_req_mgr_link_setup  : to establish link with device for a session
+ * @cam_req_mgr_notify_err  : to broadcast error happened on link for request id
+ * @cam_req_mgr_apply_req   : CRM asks device to apply certain request id.
+ * @cam_req_mgr_flush_req   : Flush or cancle request
+ * cam_req_mgr_process_evt  : generic events
  */
 typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
 typedef int (*cam_req_mgr_link_setup)(
 	struct cam_req_mgr_core_dev_link_setup *);
 typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *);
+typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
+typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
 
 /**
- * @brief: cam_req_mgr_crm_cb - func table
+ * @brief      : cam_req_mgr_crm_cb - func table
  *
- * @notify_sof: payload for sof indication event
- * @notify_err: payload for different error occurred at device
- * @add_req: pauload to inform which device and what request is received
+ * @notify_sof : payload for sof indication event
+ * @notify_err : payload for different error occurred at device
+ * @add_req    : payload to inform which device and what request is received
  */
 struct cam_req_mgr_crm_cb {
 	cam_req_mgr_notify_sof  notify_sof;
@@ -67,26 +70,30 @@
 };
 
 /**
- * @brief: cam_req_mgr_kmd_ops - func table
+ * @brief        : cam_req_mgr_kmd_ops - func table
  *
- * @get_dev_info: payload to fetch device details
- * @link_setup: payload to establish link with device
- * @apply_req: payload to apply request id on a device linked
+ * @get_dev_info : payload to fetch device details
+ * @link_setup   : payload to establish link with device
+ * @apply_req    : payload to apply request id on a device linked
+ * @flush_req    : payload to flush request
+ * @process_evt  : payload to generic event
  */
 struct cam_req_mgr_kmd_ops {
 	cam_req_mgr_get_dev_info      get_dev_info;
 	cam_req_mgr_link_setup        link_setup;
 	cam_req_mgr_apply_req         apply_req;
+	cam_req_mgr_flush_req        flush_req;
+	cam_req_mgr_process_evt      process_evt;
 };
 
 /**
  * enum cam_pipeline_delay
- * @brief: enumerator for different pipeline delays in camera
+ * @brief     : enumerator for different pipeline delays in camera
  *
- * @DELAY_0: device processed settings on same frame
- * @DELAY_1: device processed settings after 1 frame
- * @DELAY_2: device processed settings after 2 frames
- * @DELAY_MAX: maximum supported pipeline delay
+ * @DELAY_0   : device processed settings on same frame
+ * @DELAY_1   : device processed settings after 1 frame
+ * @DELAY_2   : device processed settings after 2 frames
+ * @DELAY_MAX : maximum supported pipeline delay
  */
 enum cam_pipeline_delay {
 	CAM_PIPELINE_DELAY_0,
@@ -97,11 +104,11 @@
 
 /**
  * enum cam_req_status
- * @brief: enumerator for request status
+ * @brief   : enumerator for request status
  *
- * @SUCCESS: device processed settings successfully
- * @FAILED: device processed settings failed
- * @MAX: invalid status value
+ * @SUCCESS : device processed settings successfully
+ * @FAILED  : device processed settings failed
+ * @MAX     : invalid status value
  */
 enum cam_req_status {
 	CAM_REQ_STATUS_SUCCESS,
@@ -111,15 +118,15 @@
 
 /**
  * enum cam_req_mgr_device_error
- * @brief: enumerator for different errors occurred at device
+ * @brief      : enumerator for different errors occurred at device
  *
- * @NOT_FOUND: settings asked by request manager is not found
- * @BUBBLE: device hit timing issue and is able to recover
- * @FATAL: device is in bad shape and can not recover from error
- * @PAGE_FAULT: Page fault while accessing memory
- * @OVERFLOW: Bus Overflow for IFE/VFE
- * @TIMEOUT: Timeout from cci or bus.
- * @MAX: Invalid error value
+ * @NOT_FOUND  : settings asked by request manager is not found
+ * @BUBBLE     : device hit timing issue and is able to recover
+ * @FATAL      : device is in bad shape and can not recover from error
+ * @PAGE_FAULT : Page fault while accessing memory
+ * @OVERFLOW   : Bus Overflow for IFE/VFE
+ * @TIMEOUT    : Timeout from cci or bus.
+ * @MAX        : Invalid error value
  */
 enum cam_req_mgr_device_error {
 	CRM_KMD_ERR_NOT_FOUND,
@@ -133,17 +140,17 @@
 
 /**
  * enum cam_req_mgr_device_id
- * @brief: enumerator for different devices in subsystem
+ * @brief       : enumerator for different devices in subsystem
  *
- * @CAM_REQ_MGR: request manager itself
- * @SENSOR: sensor device
- * @FLASH: LED flash or dual LED device
- * @ACTUATOR: lens mover
- * @IFE: Image processing device
- * @EXTERNAL_1: third party device
- * @EXTERNAL_2: third party device
- * @EXTERNAL_3: third party device
- * @MAX: invalid device id
+ * @CAM_REQ_MGR : request manager itself
+ * @SENSOR      : sensor device
+ * @FLASH       : LED flash or dual LED device
+ * @ACTUATOR    : lens mover
+ * @IFE         : Image processing device
+ * @EXTERNAL_1  : third party device
+ * @EXTERNAL_2  : third party device
+ * @EXTERNAL_3  : third party device
+ * @MAX         : invalid device id
  */
 enum cam_req_mgr_device_id {
 	CAM_REQ_MGR_DEVICE,
@@ -158,11 +165,22 @@
 };
 
 /* Camera device driver to Req Mgr device interface */
+
+/**
+ * enum cam_req_mgr_link_evt_type
+ * @CAM_REQ_MGR_LINK_EVT_ERR:
+ * @CAM_REQ_MGR_LINK_EVT_MAX:
+ */
+enum cam_req_mgr_link_evt_type {
+	CAM_REQ_MGR_LINK_EVT_ERR,
+	CAM_REQ_MGR_LINK_EVT_MAX,
+};
+
 /**
  * struct cam_req_mgr_sof_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @frame_id: frame id for internal tracking
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @frame_id : frame id for internal tracking
  */
 struct cam_req_mgr_sof_notify {
 	int32_t link_hdl;
@@ -172,11 +190,10 @@
 
 /**
  * struct cam_req_mgr_error_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which hit error
- * @error: what error device hit while processing this req
- *
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which hit error
+ * @error    : what error device hit while processing this req
  */
 struct cam_req_mgr_error_notify {
 	int32_t link_hdl;
@@ -187,9 +204,9 @@
 
 /**
  * struct cam_req_mgr_add_request
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which device is ready to process
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which device is ready to process
  *
  */
 struct cam_req_mgr_add_request {
@@ -202,48 +219,91 @@
 /* CRM to KMD devices */
 /**
  * struct cam_req_mgr_device_info
- * @dev_hdl: Input_param : device handle for reference
- * @name: link link or unlink
- * @dev_id: device id info
- * @p_delay: delay between time settings applied and take effect
+ * @dev_hdl : Input_param : device handle for reference
+ * @name    : link link or unlink
+ * @dev_id  : device id info
+ * @p_delay : delay between time settings applied and take effect
  *
  */
 struct cam_req_mgr_device_info {
-	int32_t dev_hdl;
-	char name[256];
-	enum cam_req_mgr_device_id dev_id;
-	enum cam_pipeline_delay p_delay;
+	int32_t                     dev_hdl;
+	char                        name[256];
+	enum cam_req_mgr_device_id  dev_id;
+	enum cam_pipeline_delay     p_delay;
 };
 
 /**
  * struct cam_req_mgr_core_dev_link_setup
- * @link_enable: link link or unlink
- * @link_hdl: link identifier
- * @dev_hdl: device handle for reference
- * @max_delay: max pipeline delay on this link
- * @crm_cb: callback funcs to communicate with req mgr
+ * @link_enable : link link or unlink
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for reference
+ * @max_delay   : max pipeline delay on this link
+ * @crm_cb      : callback funcs to communicate with req mgr
  *
  */
 struct cam_req_mgr_core_dev_link_setup {
-	bool link_enable;
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	enum cam_pipeline_delay max_delay;
+	int32_t                    link_enable;
+	int32_t                    link_hdl;
+	int32_t                    dev_hdl;
+	enum cam_pipeline_delay    max_delay;
 	struct cam_req_mgr_crm_cb *crm_cb;
 };
 
 /**
  * struct cam_req_mgr_apply_request
- * @link_id: link identifier
- * @dev_hdl: device handle for cross check
- * @request_id: request id settings to apply
- * @report_if_bubble: report to crm if failure in applying
+ * @link_hdl         : link identifier
+ * @dev_hdl          : device handle for cross check
+ * @request_id       : request id settings to apply
+ * @report_if_bubble : report to crm if failure in applying
  *
  */
 struct cam_req_mgr_apply_request {
+	int32_t    link_hdl;
+	int32_t    dev_hdl;
+	int64_t    request_id;
+	int32_t    report_if_bubble;
+};
+
+/**
+ * struct cam_req_mgr_flush_request
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for cross check
+ * @type        : cancel request type flush all or a request
+ * @request_id  : request id to cancel
+ *
+ */
+struct cam_req_mgr_flush_request {
+	int32_t     link_hdl;
+	int32_t     dev_hdl;
+	uint32_t    type;
+	int64_t     req_id;
+};
+
+/**
+ * struct cam_req_mgr_event_data
+ * @link_hdl : link handle
+ * @req_id   : request id
+ *
+ */
+struct cam_req_mgr_link_evt_data {
 	int32_t link_hdl;
 	int32_t dev_hdl;
-	int64_t request_id;
-	int32_t report_if_bubble;
+	int64_t req_id;
+
+	enum cam_req_mgr_link_evt_type evt_type;
+	union {
+		enum cam_req_mgr_device_error error;
+	} u;
+};
+
+/**
+ * struct cam_req_mgr_send_request
+ * @link_hdl   : link identifier
+ * @idx        : slot idx
+ *
+ */
+struct cam_req_mgr_send_request {
+	int32_t    link_hdl;
+	struct cam_req_mgr_req_queue *in_q;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
new file mode 100644
index 0000000..9da445d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_timer.h"
+
+void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
+{
+	if (!crm_timer)
+		return;
+	CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+		crm_timer->expires, jiffies);
+	mod_timer(&crm_timer->sys_timer,
+		(jiffies + msecs_to_jiffies(crm_timer->expires)));
+}
+
+void crm_timer_callback(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+	crm_timer_reset(timer);
+}
+
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires)
+{
+	CRM_DBG("new time %d", expires);
+	if (crm_timer) {
+		crm_timer->expires = expires;
+		crm_timer_reset(crm_timer);
+	}
+}
+
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long))
+{
+	int                       ret = 0;
+	struct cam_req_mgr_timer *crm_timer = NULL;
+
+	CRM_DBG("init timer %d %pK", expires, *timer);
+	if (*timer == NULL) {
+		crm_timer = (struct cam_req_mgr_timer *)
+			kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
+		if (!crm_timer) {
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		if (timer_cb != NULL)
+			crm_timer->timer_cb = timer_cb;
+		else
+			crm_timer->timer_cb = crm_timer_callback;
+
+		crm_timer->expires = expires;
+		crm_timer->parent = parent;
+		setup_timer(&crm_timer->sys_timer,
+			crm_timer->timer_cb, (unsigned long)crm_timer);
+		crm_timer_reset(crm_timer);
+		*timer = crm_timer;
+	} else {
+		CRM_WARN("Timer already exists!!");
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
+{
+	CRM_DBG("destroy timer %pK", *crm_timer);
+	if (*crm_timer) {
+		del_timer(&(*crm_timer)->sys_timer);
+		kfree(*crm_timer);
+		*crm_timer = NULL;
+	}
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
new file mode 100644
index 0000000..4d600ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_TIMER_H_
+#define _CAM_REQ_MGR_TIMER_H_
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core_defs.h"
+
+/** struct cam_req_mgr_timer
+ * @expires   : timeout value for timer
+ * @sys_timer : system timer variable
+ * @parent    : priv data - link pointer
+ * @timer_cb  : callback func which will be called when timeout expires
+ */
+struct cam_req_mgr_timer {
+	int32_t             expires;
+	struct timer_list   sys_timer;
+	void               *parent;
+	void              (*timer_cb)(unsigned long data);
+};
+
+/**
+ * crm_timer_modify()
+ * @brief : allows ser to modify expiry time.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires);
+
+/**
+ * crm_timer_reset()
+ * @brief : destroys the timer allocated.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_reset(struct cam_req_mgr_timer *timer);
+
+/**
+ * crm_timer_init()
+ * @brief    : create a new general purpose timer.
+ *             timer utility takes care of allocating memory and deleting
+ * @timer    : double pointer to new timer allocated
+ * @expires  : Timeout value to fire callback
+ * @parent   : void pointer which caller can use for book keeping
+ * @timer_cb : caller can chose to use its own callback function when
+ *             timer fires the timeout. If no value is set timer util
+ *             will use default.
+ */
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long));
+
+/**
+ * crm_timer_exit()
+ * @brief : destroys the timer allocated.
+ * @timer : timer pointer which will be freed
+ */
+void crm_timer_exit(struct cam_req_mgr_timer **timer);
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 1f6a97a..f53e41c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,7 +12,7 @@
 
 #include "cam_req_mgr_workq.h"
 
-/* workqueue's task manager methods */
+
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
@@ -21,7 +21,7 @@
 	if (!workq)
 		return NULL;
 
-	spin_lock(&workq->task.lock);
+	spin_lock_bh(&workq->lock_bh);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -31,9 +31,9 @@
 		atomic_sub(1, &workq->task.free_cnt);
 		list_del_init(&task->entry);
 	}
-end:
-	spin_unlock(&workq->task.lock);
 
+end:
+	spin_unlock_bh(&workq->lock_bh);
 	return task;
 }
 
@@ -42,17 +42,20 @@
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
 
+	spin_lock_bh(&workq->lock_bh);
+	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
 	task->priv = NULL;
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
+	spin_unlock_bh(&workq->lock_bh);
 }
 
 /**
  * cam_req_mgr_process_task() - Process the enqueued task
- * @task: pointer to task worker thread shall process
+ * @task: pointer to task workq thread shall process
  */
 static int cam_req_mgr_process_task(struct crm_workq_task *task)
 {
@@ -62,31 +65,10 @@
 		return -EINVAL;
 
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
-
-	switch (task->type) {
-	case CRM_WORKQ_TASK_SCHED_REQ:
-	case CRM_WORKQ_TASK_DEV_ADD_REQ:
-	case CRM_WORKQ_TASK_NOTIFY_SOF:
-	case CRM_WORKQ_TASK_NOTIFY_ACK:
-	case CRM_WORKQ_TASK_NOTIFY_ERR:
-		if (task->process_cb)
-			task->process_cb(task->priv, &task->u);
-		else
-			CRM_WARN("FATAL:no task handler registered for workq!");
-		break;
-	case CRM_WORKQ_TASK_GET_DEV_INFO:
-	case CRM_WORKQ_TASK_SETUP_LINK:
-	case CRM_WORKQ_TASK_APPLY_REQ:
-		/* These tasks are not expected to be queued to
-		 * workque at the present
-		 */
-		CRM_DBG("Not supported");
-		break;
-	case CRM_WORKQ_TASK_INVALID:
-	default:
-		CRM_ERR("Invalid task type %x", task->type);
-		break;
-	}
+	if (task->process_cb)
+		task->process_cb(task->priv, task->payload);
+	else
+		CRM_WARN("FATAL:no task handler registered for workq");
 	cam_req_mgr_workq_put_task(task);
 
 	return 0;
@@ -99,8 +81,8 @@
 static void cam_req_mgr_process_workq(struct work_struct *w)
 {
 	struct cam_req_mgr_core_workq *workq = NULL;
-	struct crm_workq_task *task, *task_save;
-
+	struct crm_workq_task         *task, *task_save;
+	int32_t                        i = CRM_TASK_PRIORITY_0;
 	if (!w) {
 		CRM_ERR("NULL task pointer can not schedule");
 		return;
@@ -108,19 +90,44 @@
 	workq = (struct cam_req_mgr_core_workq *)
 		container_of(w, struct cam_req_mgr_core_workq, work);
 
-	list_for_each_entry_safe(task, task_save,
-		&workq->task.process_head, entry) {
-		atomic_sub(1, &workq->task.pending_cnt);
-		spin_lock(&workq->task.lock);
-		list_del_init(&task->entry);
-		spin_unlock(&workq->task.lock);
-		cam_req_mgr_process_task(task);
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				atomic_sub(1, &workq->task.pending_cnt);
+				cam_req_mgr_process_task(task);
+			}
+			CRM_DBG("processed task %pK free_cnt %d",
+				task, atomic_read(&workq->task.free_cnt));
+		}
+		i++;
 	}
-	CRM_DBG("processed task %p free_cnt %d",
-		task, atomic_read(&workq->task.free_cnt));
 }
 
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task)
+void crm_workq_clear_q(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                 i = CRM_TASK_PRIORITY_0;
+	struct crm_workq_task  *task, *task_save;
+
+	CRM_DBG("pending_cnt %d",
+		atomic_read(&workq->task.pending_cnt));
+
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				cam_req_mgr_workq_put_task(task);
+				CRM_WARN("flush task %pK, %d, cnt %d",
+					task, i, atomic_read(
+					&workq->task.free_cnt));
+			}
+		}
+		i++;
+	}
+}
+
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio)
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
@@ -132,31 +139,33 @@
 	}
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
 	if (!workq) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
+		CRM_DBG("NULL workq pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 	if (!workq->job) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 
+	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock(&workq->task.lock);
+		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
-	spin_lock(&workq->task.lock);
+	task->priv = priv;
+	task->priority =
+		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
+		? prio : CRM_TASK_PRIORITY_0;
 	list_add_tail(&task->entry,
-		&workq->task.process_head);
-	spin_unlock(&workq->task.lock);
+		&workq->task.process_head[task->priority]);
 	atomic_add(1, &workq->task.pending_cnt);
-	CRM_DBG("enq task %p pending_cnt %d",
+	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-
+	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
@@ -164,7 +173,8 @@
 	return rc;
 }
 
-int cam_req_mgr_workq_create(char *name, struct cam_req_mgr_core_workq **workq)
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+	struct cam_req_mgr_core_workq **workq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -189,20 +199,35 @@
 
 		/* Workq attributes initialization */
 		INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
+		spin_lock_init(&crm_workq->lock_bh);
+		CRM_DBG("LOCK_DBG workq %s lock %pK",
+			name, &crm_workq->lock_bh);
 
 		/* Task attributes initialization */
-		spin_lock_init(&crm_workq->task.lock);
 		atomic_set(&crm_workq->task.pending_cnt, 0);
 		atomic_set(&crm_workq->task.free_cnt, 0);
-		INIT_LIST_HEAD(&crm_workq->task.process_head);
+		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
+			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
-		memset(crm_workq->task.pool, 0,
-			sizeof(struct crm_workq_task) *
-			CRM_WORKQ_NUM_TASKS);
-		for (i = 0; i < CRM_WORKQ_NUM_TASKS; i++) {
+		crm_workq->task.num_task = num_tasks;
+		crm_workq->task.pool = (struct crm_workq_task *)
+			kzalloc(sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task,
+				GFP_KERNEL);
+		if (!crm_workq->task.pool) {
+			CRM_WARN("Insufficient memory %lu",
+				sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task);
+			kfree(crm_workq);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < crm_workq->task.num_task; i++) {
 			task = &crm_workq->task.pool[i];
 			task->parent = (void *)crm_workq;
 			/* Put all tasks in free pool */
+			list_add_tail(&task->entry,
+			&crm_workq->task.process_head[CRM_TASK_PRIORITY_0]);
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
@@ -213,15 +238,16 @@
 	return 0;
 }
 
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *crm_workq)
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
-	CRM_DBG("destroy workque %p", crm_workq);
-	if (crm_workq) {
-		if (crm_workq->job) {
-			destroy_workqueue(crm_workq->job);
-			crm_workq->job = NULL;
+	CRM_DBG("destroy workque %pK", crm_workq);
+	if (*crm_workq) {
+		crm_workq_clear_q(*crm_workq);
+		if ((*crm_workq)->job) {
+			destroy_workqueue((*crm_workq)->job);
+			(*crm_workq)->job = NULL;
 		}
-		kfree(crm_workq);
-		crm_workq = NULL;
+		kfree(*crm_workq);
+		*crm_workq = NULL;
 	}
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 6b36abc..7d8ca59 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _CAM_WORKER_H_
-#define _CAM_WORKER_H_
+#ifndef _CAM_REQ_MGR_WORKQ_H_
+#define _CAM_REQ_MGR_WORKQ_H_
 
 #include<linux/kernel.h>
 #include<linux/module.h>
@@ -23,99 +23,78 @@
 
 #include "cam_req_mgr_core.h"
 
-/* Macros */
-#define CRM_WORKQ_NUM_TASKS 30
-
-/**enum crm_workq_task_type
- * @codes: to identify which type of task is present
- */
-enum crm_workq_task_type {
-	CRM_WORKQ_TASK_GET_DEV_INFO,
-	CRM_WORKQ_TASK_SETUP_LINK,
-	CRM_WORKQ_TASK_SCHED_REQ,
-	CRM_WORKQ_TASK_DEV_ADD_REQ,
-	CRM_WORKQ_TASK_APPLY_REQ,
-	CRM_WORKQ_TASK_NOTIFY_SOF,
-	CRM_WORKQ_TASK_NOTIFY_ACK,
-	CRM_WORKQ_TASK_NOTIFY_ERR,
-	CRM_WORKQ_TASK_INVALID,
+/* Task priorities, lower the number higher the priority*/
+enum crm_task_priority {
+	CRM_TASK_PRIORITY_0 = 0,
+	CRM_TASK_PRIORITY_1 = 1,
+	CRM_TASK_PRIORITY_MAX = 2,
 };
 
 /** struct crm_workq_task
- * @type: type of task
- * u -
- * @csl_req: contains info of  incoming reqest from CSL to CRM
- * @dev_req: contains tracking info of available req id at device
- * @apply_req: contains info of which request is applied at device
- * @notify_sof: contains notification from IFE to CRM about SOF trigger
- * @notify_err: contains error inf happened while processing request
- * @dev_info: contains info about which device is connected with CRM
- * @link_setup: contains info about new link being setup
- * -
- * @process_cb: registered callback called by workq when task enqueued is ready
- *  for processing in workq thread context
- * @parent: workq's parent is link which is enqqueing taks to this workq
- * @entry: list head of this list entry is worker's empty_head
- * @cancel: if caller has got free task from pool but wants to abort or put
- *  back without using it
- * @priv: when task is enqueuer caller can attach cookie
+ * @priority   : caller can assign priority to task based on type.
+ * @payload    : depending of user of task this payload type will change
+ * @process_cb : registered callback called by workq when task enqueued is
+ *               ready for processing in workq thread context
+ * @parent     : workq's parent is link which is enqqueing taks to this workq
+ * @entry      : list head of this list entry is worker's empty_head
+ * @cancel     : if caller has got free task from pool but wants to abort
+ *               or put back without using it
+ * @priv       : when task is enqueuer caller can attach priv along which
+ *               it will get in process callback
+ * @ret        : return value in future to use for blocking calls
  */
 struct crm_workq_task {
-	enum crm_workq_task_type type;
-	union {
-		struct cam_req_mgr_sched_request csl_req;
-		struct cam_req_mgr_add_request dev_req;
-		struct cam_req_mgr_apply_request apply_req;
-		struct cam_req_mgr_sof_notify notify_sof;
-		struct cam_req_mgr_error_notify notify_err;
-		struct cam_req_mgr_device_info dev_info;
-		struct cam_req_mgr_core_dev_link_setup link_setup;
-	} u;
-	int (*process_cb)(void *, void *);
-	void *parent;
-	struct list_head entry;
-	uint8_t cancel;
-	void *priv;
+	int32_t                  priority;
+	void                    *payload;
+	int32_t                (*process_cb)(void *, void *);
+	void                    *parent;
+	struct list_head         entry;
+	uint8_t                  cancel;
+	void                    *priv;
+	int32_t                  ret;
 };
 
-/** struct crm_core_worker
- * @work: work token used by workqueue
- * @job: workqueue internal job struct
- *task -
- * @lock: lock for task structs
- * @pending_cnt:  num of tasks pending to be processed
- * @free_cnt:  num of free/available tasks
- * @process_head: list  head of tasks pending process
- * @empty_head: list  head of available tasks which can be used
- * or acquired in order to enqueue a task to workq
- * @pool: pool  of tasks used for handling events in workq context
- *@num_task : size of tasks pool
+/** struct cam_req_mgr_core_workq
+ * @work       : work token used by workqueue
+ * @job        : workqueue internal job struct
+ * task -
+ * @lock       : lock for task structs
+ * @free_cnt   :  num of free/available tasks
+ * @empty_head : list  head of available taska which can be used
+ *               or acquired in order to enqueue a task to workq
+ * @pool       : pool of tasks used for handling events in workq context
+ * @num_task   : size of tasks pool
+ * -
  */
 struct cam_req_mgr_core_workq {
-	struct work_struct work;
-	struct workqueue_struct *job;
+	struct work_struct         work;
+	struct workqueue_struct   *job;
+	spinlock_t                 lock_bh;
 
+	/* tasks */
 	struct {
-		spinlock_t lock;
-		atomic_t pending_cnt;
-		atomic_t free_cnt;
+		struct mutex           lock;
+		atomic_t               pending_cnt;
+		atomic_t               free_cnt;
 
-		struct list_head process_head;
-		struct list_head empty_head;
-		struct crm_workq_task pool[CRM_WORKQ_NUM_TASKS];
+		struct list_head       process_head[CRM_TASK_PRIORITY_MAX];
+		struct list_head       empty_head;
+		struct crm_workq_task *pool;
+		uint32_t               num_task;
 	} task;
 };
 
 /**
  * cam_req_mgr_workq_create()
- * @brief: create a workqueue
- * @name: Name of the workque to be allocated,
- * it is combination of session handle and link handle
- * @workq: Double pointer worker
+ * @brief    : create a workqueue
+ * @name     : Name of the workque to be allocated, it is combination
+ *             of session handle and link handle
+ * @num_task : Num_tasks to be allocated for workq
+ * @workq    : Double pointer worker
  * This function will allocate and create workqueue and pass
- * the worker pointer to caller.
+ * the workq pointer to caller.
  */
-int cam_req_mgr_workq_create(char *name,
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
 	struct cam_req_mgr_core_workq **workq);
 
 /**
@@ -125,15 +104,18 @@
  * this function will destroy workqueue and clean up resources
  * associated with worker such as tasks.
  */
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *workq);
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
 
 /**
  * cam_req_mgr_workq_enqueue_task()
  * @brief: Enqueue task in worker queue
- * @task: task to be processed by worker
+ * @task : task to be processed by worker
+ * @priv : clients private data
+ * @prio : task priority
  * process callback func
  */
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task);
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio);
 
 /**
  * cam_req_mgr_workq_get_task()
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index f4215b5..236e7f1 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -1227,7 +1227,6 @@
 	list_for_each_entry(mapping,
 		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
 		if (mapping->ion_fd == ion_fd) {
-			mapping->ref_count++;
 			*paddr_ptr = mapping->paddr;
 			*len_ptr = mapping->len;
 			return CAM_SMMU_BUFF_EXIST;
@@ -1670,7 +1669,7 @@
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
 		CDBG("ion_fd:%d already in the list, give same addr back",
 				 ion_fd);
-		rc = 0;
+		rc = -EALREADY;
 		goto get_addr_end;
 	}
 	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
@@ -1777,14 +1776,6 @@
 		goto unmap_end;
 	}
 
-	mapping_info->ref_count--;
-	if (mapping_info->ref_count > 0) {
-		CDBG("There are still %u buffer(s) with same fd %d",
-			mapping_info->ref_count, mapping_info->ion_fd);
-		rc = 0;
-		goto unmap_end;
-	}
-
 	/* Unmapping one buffer from device */
 	CDBG("SMMU: removing buffer idx = %d\n", idx);
 	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
@@ -1833,8 +1824,6 @@
 		goto put_addr_end;
 	}
 
-	mapping_info->ref_count--;
-
 put_addr_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index f283b9d..006cd49 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1229,7 +1229,7 @@
 	{
 		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		inst->clk_data.bitrate = ctrl->val;
 		break;
@@ -1253,7 +1253,7 @@
 
 		property_id = HAL_CONFIG_VENC_MAX_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		break;
 	}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 1a65d07..d80e7f4 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -90,7 +90,7 @@
 	done = budget - quota;
 
 	if (done < budget) {
-		napi_complete(napi);
+		napi_complete_done(napi, done);
 		wil6210_unmask_irq_rx(wil);
 		wil_dbg_txrx(wil, "NAPI RX complete\n");
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index fb42ef7..f5d8227 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -3546,16 +3546,32 @@
 	ipa_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
 	if (ipa_ctx->ipa_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa_ctx->ipa_active_clients.cnt++;
+		ipa2_active_clients_log_inc(&log_info, false);
+		ipa_active_clients_unlock();
+
 		clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
 		if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
 			if (msm_bus_scale_client_update_request(
 			    ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
 				WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG("clocks are gated, not setting rate\n");
+		 ipa_active_clients_unlock();
 	}
-	ipa_active_clients_unlock();
 	IPADBG("Done\n");
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index cd575fe..5568f8b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,6 +51,7 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_BAM_STOP_MAX_RETRY 10
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 01eea36..9a3c146 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -591,6 +591,7 @@
 {
 	int index;
 	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	int retries = 0;
 
 	mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
 
@@ -600,6 +601,7 @@
 		return -EBADF;
 	}
 
+send_cmd:
 	init_completion(&ipa_ctx->uc_ctx.uc_completion);
 
 	ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
@@ -659,6 +661,19 @@
 	}
 
 	if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+		if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR ==
+			ipa_ctx->uc_ctx.uc_status) {
+			retries++;
+			if (retries == IPA_BAM_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+			} else {
+				/* sleep for short period to flush IPA */
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+				goto send_cmd;
+			}
+		}
+
 		IPAERR("Recevied status %u, Expected status %u\n",
 			ipa_ctx->uc_ctx.uc_status, expected_status);
 		ipa_ctx->uc_ctx.pending_cmd = -1;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
index 3bec471..a98d602 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -441,7 +441,7 @@
 
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent
+ * enum ipa_hw_2_cpu_offload_cmd_resp_status -  Values that represent
  * offload related command response status to be sent to CPU.
  */
 enum ipa_hw_2_cpu_offload_cmd_resp_status {
@@ -478,6 +478,47 @@
 };
 
 /**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
  * struct IpaHwSetUpCmd  -
  *
  *
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 128674a..b7815cb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -111,47 +111,6 @@
 };
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
- * command response status to be sent to CPU.
- */
-enum ipa_hw_2_cpu_cmd_resp_status {
-	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
-	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
-	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
-	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
-	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
-	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
-	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
-	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
-	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
-	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
-	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
-	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
-	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
-	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
-	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
-	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
-	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
-};
-
-/**
  * enum ipa_hw_wdi_errors - WDI specific error types.
  * @IPA_HW_WDI_ERROR_NONE : No error persists
  * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 30f5712..862b147 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3569,16 +3569,32 @@
 	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
 	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa3_ctx->ipa3_active_clients.cnt++;
+		ipa3_active_clients_log_inc(&log_info, false);
+		ipa3_active_clients_unlock();
+
 		if (ipa3_clk)
 			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
 		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
 			ipa3_get_bus_vote()))
 			WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG_LOW("clocks are gated, not setting rate\n");
+		ipa3_active_clients_unlock();
 	}
-	ipa3_active_clients_unlock();
 	IPADBG_LOW("Done\n");
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 796103f..0b8115f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -239,6 +239,8 @@
 	struct gsi_xfer_elem xfer_elem;
 	int i;
 	int aggr_active_bitmap = 0;
+	bool pipe_suspended = false;
+	struct ipa_ep_cfg_ctrl ctrl;
 
 	IPADBG("Applying reset channel with open aggregation frame WA\n");
 	ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
@@ -265,6 +267,15 @@
 	if (result)
 		return -EFAULT;
 
+	ipahal_read_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, &ctrl);
+	if (ctrl.ipa_ep_suspend) {
+		IPADBG("pipe is suspended, remove suspend\n");
+		pipe_suspended = true;
+		ctrl.ipa_ep_suspend = false;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
+
 	/* Start channel and put 1 Byte descriptor on it */
 	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
 	if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -324,6 +335,13 @@
 	 */
 	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
 
+	if (pipe_suspended) {
+		IPADBG("suspend the pipe again\n");
+		ctrl.ipa_ep_suspend = true;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
+
 	/* Restore channels properties */
 	result = ipa3_restore_channel_properties(ep, &orig_chan_props,
 		&orig_chan_scratch);
@@ -338,6 +356,12 @@
 	ipa3_stop_gsi_channel(clnt_hdl);
 	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
 start_chan_fail:
+	if (pipe_suspended) {
+		IPADBG("suspend the pipe again\n");
+		ctrl.ipa_ep_suspend = true;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
 	ipa3_restore_channel_properties(ep, &orig_chan_props,
 		&orig_chan_scratch);
 restore_props_fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 89dd274..faa47d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1055,7 +1055,7 @@
 		ipa_assert();
 		return result;
 	}
-	result = gsi_reset_channel(ep->gsi_chan_hdl);
+	result = ipa3_reset_gsi_channel(clnt_hdl);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("Failed to reset chan: %d.\n", result);
 		ipa_assert();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 78fd90b..d369e82 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -649,6 +649,21 @@
 		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
 }
 
+static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	ep_ctrl->ipa_ep_suspend =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >>
+			IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT);
+
+	ep_ctrl->ipa_ep_delay =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >>
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
+}
+
 static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1059,7 +1074,8 @@
 		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
 		0x0000080C, 0x70},
 	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
-		ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+		ipareg_construct_endp_init_ctrl_n,
+		ipareg_parse_endp_init_ctrl_n,
 		0x00000800, 0x70},
 	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = {
 		ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 5595b7b..4e9bd64 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -764,6 +764,25 @@
 	return rc;
 }
 
+static int msm_11ad_ssr_copy_ramdump(struct msm11ad_ctx *ctx)
+{
+	if (ctx->rops.ramdump && ctx->wil_handle) {
+		int rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
+					   WIGIG_RAMDUMP_SIZE);
+		if (rc) {
+			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	ctx->dump_data.version = WIGIG_DUMP_FORMAT_VER;
+	strlcpy(ctx->dump_data.name, WIGIG_SUBSYS_NAME,
+		sizeof(ctx->dump_data.name));
+
+	ctx->dump_data.magic = WIGIG_DUMP_MAGIC_VER_V1;
+	return 0;
+}
+
 static int msm_11ad_ssr_ramdump(int enable, const struct subsys_desc *subsys)
 {
 	int rc;
@@ -780,13 +799,10 @@
 	if (!enable)
 		return 0;
 
-	if (ctx->rops.ramdump && ctx->wil_handle) {
-		rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
-				       WIGIG_RAMDUMP_SIZE);
-		if (rc) {
-			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
-			return -EINVAL;
-		}
+	if (!ctx->recovery_in_progress) {
+		rc = msm_11ad_ssr_copy_ramdump(ctx);
+		if (rc)
+			return rc;
 	}
 
 	memset(&segment, 0, sizeof(segment));
@@ -798,7 +814,6 @@
 
 static void msm_11ad_ssr_crash_shutdown(const struct subsys_desc *subsys)
 {
-	int rc;
 	struct platform_device *pdev;
 	struct msm11ad_ctx *ctx;
 
@@ -810,19 +825,8 @@
 		return;
 	}
 
-	if (ctx->rops.ramdump && ctx->wil_handle) {
-		rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
-				       WIGIG_RAMDUMP_SIZE);
-		if (rc)
-			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
-		/* continue */
-	}
-
-	ctx->dump_data.version = WIGIG_DUMP_FORMAT_VER;
-	strlcpy(ctx->dump_data.name, WIGIG_SUBSYS_NAME,
-		sizeof(ctx->dump_data.name));
-
-	ctx->dump_data.magic = WIGIG_DUMP_MAGIC_VER_V1;
+	if (!ctx->recovery_in_progress)
+		(void)msm_11ad_ssr_copy_ramdump(ctx);
 }
 
 static void msm_11ad_ssr_deinit(struct msm11ad_ctx *ctx)
@@ -901,7 +905,7 @@
 static void msm_11ad_init_cpu_boost(struct msm11ad_ctx *ctx)
 {
 	unsigned int minfreq = 0, maxfreq = 0, freq;
-	int i, boost_cpu;
+	int i, boost_cpu = 0;
 
 	for_each_possible_cpu(i) {
 		freq = cpufreq_quick_get_max(i);
@@ -1321,6 +1325,7 @@
 
 	if (ctx->subsys) {
 		dev_info(ctx->dev, "SSR requested\n");
+		(void)msm_11ad_ssr_copy_ramdump(ctx);
 		ctx->recovery_in_progress = true;
 		rc = subsystem_restart_dev(ctx->subsys);
 		if (rc) {
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3f218f5..c5ab1b0 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@
  */
 static int storvsc_timeout = 180;
 
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
 static struct scsi_transport_template *fc_transport_template;
 #endif
@@ -1283,6 +1281,22 @@
 	return ret;
 }
 
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+	/*
+	 * Set blist flag to permit the reading of the VPD pages even when
+	 * the target may claim SPC-2 compliance. MSFT targets currently
+	 * claim SPC-2 compliance while they implement post SPC-2 features.
+	 * With this flag we can correctly handle WRITE_SAME_16 issues.
+	 *
+	 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+	 * still supports REPORT LUN.
+	 */
+	sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+	return 0;
+}
+
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
 
@@ -1298,14 +1312,6 @@
 	sdevice->no_write_same = 1;
 
 	/*
-	 * Add blist flags to permit the reading of the VPD pages even when
-	 * the target may claim SPC-2 compliance. MSFT targets currently
-	 * claim SPC-2 compliance while they implement post SPC-2 features.
-	 * With this patch we can correctly handle WRITE_SAME_16 issues.
-	 */
-	sdevice->sdev_bflags |= msft_blist_flags;
-
-	/*
 	 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
 	 * if the device is a MSFT virtual device.  If the host is
 	 * WIN10 or newer, allow write_same.
@@ -1569,6 +1575,7 @@
 	.eh_host_reset_handler =	storvsc_host_reset_handler,
 	.proc_name =		"storvsc_host",
 	.eh_timed_out =		storvsc_eh_timed_out,
+	.slave_alloc =		storvsc_device_alloc,
 	.slave_configure =	storvsc_device_configure,
 	.cmd_per_lun =		255,
 	.this_id =		-1,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6e3e636..22d32d2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5621,17 +5621,15 @@
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
-	const struct pciserial_board *board;
+	struct serial_private *new;
 
 	if (!priv)
 		return;
 
-	board = priv->board;
-	kfree(priv);
-	priv = pciserial_init_ports(dev, board);
-
-	if (!IS_ERR(priv)) {
-		pci_set_drvdata(dev, priv);
+	new = pciserial_init_ports(dev, priv->board);
+	if (!IS_ERR(new)) {
+		pci_set_drvdata(dev, new);
+		kfree(priv);
 	}
 }
 
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index a2a9185..51ab794 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -17,6 +17,7 @@
 #include <linux/device.h>
 #include <linux/usb/audio.h>
 #include <linux/wait.h>
+#include <linux/pm_qos.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
@@ -268,6 +269,8 @@
 	/* number of frames sent since start_time */
 	s64				frames_sent;
 	struct audio_source_config	*config;
+	/* for creating and issuing QoS requests */
+	struct pm_qos_request pm_qos;
 };
 
 static inline struct audio_dev *func_to_audio(struct usb_function *f)
@@ -740,6 +743,10 @@
 	runtime->hw.channels_max = 2;
 
 	audio->substream = substream;
+
+	/* Add the QoS request and set the latency to 0 */
+	pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
 	return 0;
 }
 
@@ -749,6 +756,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&audio->lock, flags);
+
+	/* Remove the QoS request */
+	pm_qos_remove_request(&audio->pm_qos);
+
 	audio->substream = NULL;
 	spin_unlock_irqrestore(&audio->lock, flags);
 
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index aaa0fc2..af1bca6 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -47,6 +47,7 @@
 #define MTP_BULK_BUFFER_SIZE       16384
 #define INTR_BUFFER_SIZE           28
 #define MAX_INST_NAME_LEN          40
+#define MTP_MAX_FILE_SIZE          0xFFFFFFFFL
 
 /* String IDs */
 #define INTERFACE_STRING_INDEX	0
@@ -837,7 +838,12 @@
 		if (hdr_size) {
 			/* prepend MTP data header */
 			header = (struct mtp_data_header *)req->buf;
-			header->length = __cpu_to_le32(count);
+			/*
+                         * set file size with header according to
+                         * MTP Specification v1.0
+                         */
+			header->length = (count > MTP_MAX_FILE_SIZE) ?
+				MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
 			header->type = __cpu_to_le16(2); /* data packet */
 			header->command = __cpu_to_le16(dev->xfer_command);
 			header->transaction_id =
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e643c7..18dc18f 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -315,7 +315,32 @@
 	struct page **pages;
 	pgoff_t next_index;
 	int nr_pages = 0;
-	int ret;
+	int got = 0;
+	int ret = 0;
+
+	if (!current->journal_info) {
+		/* caller of readpages does not hold buffer and read caps
+		 * (fadvise, madvise and readahead cases) */
+		int want = CEPH_CAP_FILE_CACHE;
+		ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+		if (ret < 0) {
+			dout("start_read %p, error getting cap\n", inode);
+		} else if (!(got & want)) {
+			dout("start_read %p, no cache cap\n", inode);
+			ret = 0;
+		}
+		if (ret <= 0) {
+			if (got)
+				ceph_put_cap_refs(ci, got);
+			while (!list_empty(page_list)) {
+				page = list_entry(page_list->prev,
+						  struct page, lru);
+				list_del(&page->lru);
+				put_page(page);
+			}
+			return ret;
+		}
+	}
 
 	off = (u64) page_offset(page);
 
@@ -338,15 +363,18 @@
 				    CEPH_OSD_FLAG_READ, NULL,
 				    ci->i_truncate_seq, ci->i_truncate_size,
 				    false);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
 
 	/* build page vector */
 	nr_pages = calc_pages_for(0, len);
 	pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!pages)
-		goto out;
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
 	for (i = 0; i < nr_pages; ++i) {
 		page = list_entry(page_list->prev, struct page, lru);
 		BUG_ON(PageLocked(page));
@@ -379,6 +407,12 @@
 	if (ret < 0)
 		goto out_pages;
 	ceph_osdc_put_request(req);
+
+	/* After adding locked pages to page cache, the inode holds cache cap.
+	 * So we can drop our cap refs. */
+	if (got)
+		ceph_put_cap_refs(ci, got);
+
 	return nr_pages;
 
 out_pages:
@@ -387,8 +421,11 @@
 		unlock_page(pages[i]);
 	}
 	ceph_put_page_vector(pages, nr_pages, false);
-out:
+out_put:
 	ceph_osdc_put_request(req);
+out:
+	if (got)
+		ceph_put_cap_refs(ci, got);
 	return ret;
 }
 
@@ -425,7 +462,6 @@
 		rc = start_read(inode, page_list, max);
 		if (rc < 0)
 			goto out;
-		BUG_ON(rc == 0);
 	}
 out:
 	ceph_fscache_readpages_cancel(inode, page_list);
@@ -1372,9 +1408,11 @@
 	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
-	    ci->i_inline_version == CEPH_INLINE_NONE)
+	    ci->i_inline_version == CEPH_INLINE_NONE) {
+		current->journal_info = vma->vm_file;
 		ret = filemap_fault(vma, vmf);
-	else
+		current->journal_info = NULL;
+	} else
 		ret = -EAGAIN;
 
 	dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f3f2110..03951f9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2479,6 +2479,27 @@
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
 
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+{
+	int ret, err = 0;
+
+	BUG_ON(need & ~CEPH_CAP_FILE_RD);
+	BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+	ret = ceph_pool_perm_check(ci, need);
+	if (ret < 0)
+		return ret;
+
+	ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+	if (ret) {
+		if (err == -EAGAIN) {
+			ret = 0;
+		} else if (err < 0) {
+			ret = err;
+		}
+	}
+	return ret;
+}
+
 /*
  * Wait for caps, and take cap references.  If we can't get a WR cap
  * due to a small max_size, make sure we check_max_size (and possibly
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index f995e35..ca3f630 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1249,8 +1249,9 @@
 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
 		     ceph_cap_string(got));
-
+		current->journal_info = filp;
 		ret = generic_file_read_iter(iocb, to);
+		current->journal_info = NULL;
 	}
 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3e3fa916..622d5dd 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -905,6 +905,8 @@
 
 extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
 			 loff_t endoff, int *got, struct page **pinned_page);
+extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+			     int need, int want, int *got);
 
 /* for counting open files by mode */
 extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 94661cf..b3830f7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -241,6 +241,7 @@
 	/* verify the message */
 	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
 	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+	int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
 	void (*downgrade_oplock)(struct TCP_Server_Info *,
 					struct cifsInodeInfo *, bool);
 	/* process transaction2 response */
@@ -1314,12 +1315,19 @@
 	void *callback_data;	  /* general purpose pointer for callback */
 	void *resp_buf;		/* pointer to received SMB header */
 	int mid_state;	/* wish this were enum but can not pass to wait_event */
+	unsigned int mid_flags;
 	__le16 command;		/* smb command code */
 	bool large_buf:1;	/* if valid response, is pointer to large buf */
 	bool multiRsp:1;	/* multiple trans2 responses for one request  */
 	bool multiEnd:1;	/* both received */
 };
 
+struct close_cancelled_open {
+	struct cifs_fid         fid;
+	struct cifs_tcon        *tcon;
+	struct work_struct      work;
+};
+
 /*	Make code in transport.c a little cleaner by moving
 	update of optional stats into function below */
 #ifdef CONFIG_CIFS_STATS2
@@ -1451,6 +1459,9 @@
 #define   MID_RESPONSE_MALFORMED 0x10
 #define   MID_SHUTDOWN		 0x20
 
+/* Flags */
+#define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
 #define   CIFS_SMALL_BUFFER     1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e3fed92..586fdac 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1423,6 +1423,8 @@
 
 	length = discard_remaining_data(server);
 	dequeue_mid(mid, rdata->result);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
@@ -1534,6 +1536,8 @@
 		return cifs_readv_discard(server, mid);
 
 	dequeue_mid(mid, false);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 893be07..b8015de 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -882,10 +882,19 @@
 
 		server->lstrp = jiffies;
 		if (mid_entry != NULL) {
+			if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+			     mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+					server->ops->handle_cancelled_mid)
+				server->ops->handle_cancelled_mid(
+							mid_entry->resp_buf,
+							server);
+
 			if (!mid_entry->multiRsp || mid_entry->multiEnd)
 				mid_entry->callback(mid_entry);
-		} else if (!server->ops->is_oplock_break ||
-			   !server->ops->is_oplock_break(buf, server)) {
+		} else if (server->ops->is_oplock_break &&
+			   server->ops->is_oplock_break(buf, server)) {
+			cifs_dbg(FYI, "Received oplock break\n");
+		} else {
 			cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
 				 atomic_read(&midCount));
 			cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3d38348..9730780 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -654,3 +654,47 @@
 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
 	return false;
 }
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+	struct close_cancelled_open *cancelled = container_of(work,
+					struct close_cancelled_open, work);
+
+	cifs_dbg(VFS, "Close unmatched open\n");
+
+	SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+		   cancelled->fid.volatile_fid);
+	cifs_put_tcon(cancelled->tcon);
+	kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+	struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
+	struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+	struct cifs_tcon *tcon;
+	struct close_cancelled_open *cancelled;
+
+	if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
+		return 0;
+
+	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+	if (!cancelled)
+		return -ENOMEM;
+
+	tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
+	if (!tcon) {
+		kfree(cancelled);
+		return -ENOENT;
+	}
+
+	cancelled->fid.persistent_fid = rsp->PersistentFileId;
+	cancelled->fid.volatile_fid = rsp->VolatileFileId;
+	cancelled->tcon = tcon;
+	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+	queue_work(cifsiod_wq, &cancelled->work);
+
+	return 0;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 5d456eb..007abf7 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1565,6 +1565,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1645,6 +1646,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1727,6 +1729,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1815,6 +1818,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index f2d511a..04ef6e9 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@
 			      struct smb_rqst *rqst);
 extern struct mid_q_entry *smb2_setup_async_request(
 			struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+					   __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+						__u64 ses_id, __u32  tid);
 extern int smb2_calc_signature(struct smb_rqst *rqst,
 				struct TCP_Server_Info *server);
 extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -158,6 +162,9 @@
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
 			     const u64 persistent_fid, const u64 volatile_fid,
 			     const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+					struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
 			 u64 persistent_file_id, u64 volatile_file_id,
 			 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index bc9a7b6..390b0d0 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,22 +115,68 @@
 }
 
 static struct cifs_ses *
-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
 {
 	struct cifs_ses *ses;
 
-	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
-		if (ses->Suid != smb2hdr->SessionId)
+		if (ses->Suid != ses_id)
 			continue;
-		spin_unlock(&cifs_tcp_ses_lock);
 		return ses;
 	}
-	spin_unlock(&cifs_tcp_ses_lock);
 
 	return NULL;
 }
 
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+	struct cifs_ses *ses;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
+{
+	struct cifs_tcon *tcon;
+
+	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+		if (tcon->tid != tid)
+			continue;
+		++tcon->tc_count;
+		return tcon;
+	}
+
+	return NULL;
+}
+
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+	struct cifs_ses *ses;
+	struct cifs_tcon *tcon;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	if (!ses) {
+		spin_unlock(&cifs_tcp_ses_lock);
+		return NULL;
+	}
+	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return tcon;
+}
 
 int
 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
@@ -142,7 +188,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
@@ -359,7 +405,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 206a597..cc26d41 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -727,9 +727,11 @@
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
+		cifs_dbg(FYI, "Cancelling wait for mid %llu\n",	midQ->mid);
 		send_cancel(ses->server, buf, midQ);
 		spin_lock(&GlobalMid_Lock);
 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+			midQ->mid_flags |= MID_WAIT_CANCELLED;
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
 			cifs_small_buf_release(buf);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b938fa7..7ec77f8 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -40,6 +40,7 @@
 	short unsigned settime_flags;	/* to show in fdinfo */
 	struct rcu_head rcu;
 	struct list_head clist;
+	spinlock_t cancel_lock;
 	bool might_cancel;
 };
 
@@ -113,7 +114,7 @@
 	rcu_read_unlock();
 }
 
-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
 	if (ctx->might_cancel) {
 		ctx->might_cancel = false;
@@ -123,6 +124,13 @@
 	}
 }
 
+static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+{
+	spin_lock(&ctx->cancel_lock);
+	__timerfd_remove_cancel(ctx);
+	spin_unlock(&ctx->cancel_lock);
+}
+
 static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
 	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
@@ -133,6 +141,7 @@
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
+	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
 	     ctx->clockid == CLOCK_REALTIME_ALARM ||
 	     ctx->clockid == CLOCK_POWEROFF_ALARM) &&
@@ -143,9 +152,10 @@
 			list_add_rcu(&ctx->clist, &cancel_list);
 			spin_unlock(&cancel_lock);
 		}
-	} else if (ctx->might_cancel) {
-		timerfd_remove_cancel(ctx);
+	} else {
+		__timerfd_remove_cancel(ctx);
 	}
+	spin_unlock(&ctx->cancel_lock);
 }
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
@@ -403,6 +413,7 @@
 		return -ENOMEM;
 
 	init_waitqueue_head(&ctx->wqh);
+	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
 	if (isalarm(ctx)) {
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 24dd11e..91ea077 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -55,7 +55,6 @@
 #define DISP_CC_PLL0						38
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				39
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				40
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			41
 
 #define DISP_CC_MDSS_CORE_BCR					0
 #define DISP_CC_MDSS_GCC_CLOCKS_BCR				1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 73a8c0b..e411e8e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -187,6 +187,16 @@
 #define GPLL0_OUT_MAIN						169
 #define GPLL1							170
 #define GPLL1_OUT_MAIN						171
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				172
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				173
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			174
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			175
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			176
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				177
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			178
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			179
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				180
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				181
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index a31fa20..f3c3d1d 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -27,7 +27,5 @@
 #define RPMH_RF_CLK2_A						9
 #define RPMH_RF_CLK3						10
 #define RPMH_RF_CLK3_A						11
-#define RPMH_QDSS_CLK						12
-#define RPMH_QDSS_A_CLK						13
 
 #endif
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 74034c6..44b42a6 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -356,4 +356,14 @@
 	struct drm_clip_rect roi[SDE_MAX_ROI_V1];
 };
 
+/**
+ * Define extended power modes supported by the SDE connectors.
+ */
+#define SDE_MODE_DPMS_ON	0
+#define SDE_MODE_DPMS_LP1	1
+#define SDE_MODE_DPMS_LP2	2
+#define SDE_MODE_DPMS_STANDBY	3
+#define SDE_MODE_DPMS_SUSPEND	4
+#define SDE_MODE_DPMS_OFF	5
+
 #endif /* _SDE_DRM_H_ */
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 30e0107..3c32c74 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -29,7 +29,6 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INET6_AH=y
-CONFIG_INET6_DIAG_DESTROY=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_INET=y
@@ -72,7 +71,6 @@
 CONFIG_NET=y
 CONFIG_NETDEVICES=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_TPROXY=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -173,5 +171,4 @@
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_OTG_WAKELOCK=y
 CONFIG_XFRM_USER=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1d203e1..21a8764 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1488,14 +1488,12 @@
 	/* (Un)Install the callbacks for further cpu hotplug operations */
 	struct cpuhp_step *sp;
 
-	mutex_lock(&cpuhp_state_mutex);
 	sp = cpuhp_get_step(state);
 	sp->startup.single = startup;
 	sp->teardown.single = teardown;
 	sp->name = name;
 	sp->multi_instance = multi_instance;
 	INIT_HLIST_HEAD(&sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 }
 
 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@@ -1565,16 +1563,13 @@
 {
 	enum cpuhp_state i;
 
-	mutex_lock(&cpuhp_state_mutex);
 	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
 		if (cpuhp_ap_states[i].name)
 			continue;
 
 		cpuhp_ap_states[i].name = "Reserved";
-		mutex_unlock(&cpuhp_state_mutex);
 		return i;
 	}
-	mutex_unlock(&cpuhp_state_mutex);
 	WARN(1, "No more dynamic states available for CPU hotplug\n");
 	return -ENOSPC;
 }
@@ -1591,6 +1586,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (!invoke || !sp->startup.multi)
 		goto add_node;
@@ -1615,11 +1611,10 @@
 	}
 add_node:
 	ret = 0;
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_add_head(node, &sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 
 err:
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 	return ret;
 }
@@ -1648,6 +1643,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	/* currently assignments for the ONLINE state are possible */
 	if (state == CPUHP_AP_ONLINE_DYN) {
@@ -1683,6 +1679,8 @@
 		}
 	}
 out:
+	mutex_unlock(&cpuhp_state_mutex);
+
 	put_online_cpus();
 	if (!ret && dyn_state)
 		return state;
@@ -1702,6 +1700,8 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
+
 	if (!invoke || !cpuhp_get_teardown_cb(state))
 		goto remove;
 	/*
@@ -1718,7 +1718,6 @@
 	}
 
 remove:
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_del(node);
 	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
@@ -1743,6 +1742,7 @@
 	BUG_ON(cpuhp_cb_check(state));
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (sp->multi_instance) {
 		WARN(!hlist_empty(&sp->list),
@@ -1768,6 +1768,7 @@
 	}
 remove:
 	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 104432f..dac3724 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -78,6 +78,9 @@
 		bool affinity_broken;
 
 		desc = irq_to_desc(irq);
+		if (!desc)
+			continue;
+
 		raw_spin_lock(&desc->lock);
 		affinity_broken = migrate_one_irq(desc);
 		raw_spin_unlock(&desc->lock);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 0e9505f..1258b16 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -559,7 +559,8 @@
 
 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
-		kasan_report_double_free(cache, object, shadow_byte);
+		kasan_report_double_free(cache, object,
+				__builtin_return_address(1));
 		return true;
 	}
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6..7572917 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -104,7 +104,7 @@
 void kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 void kasan_report_double_free(struct kmem_cache *cache, void *object,
-			s8 shadow);
+					void *ip);
 
 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 073325a..35d2db8 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -48,7 +48,13 @@
 	return first_bad_addr;
 }
 
-static void print_error_description(struct kasan_access_info *info)
+static bool addr_has_shadow(struct kasan_access_info *info)
+{
+	return (info->access_addr >=
+		kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+}
+
+static const char *get_shadow_bug_type(struct kasan_access_info *info)
 {
 	const char *bug_type = "unknown-crash";
 	u8 *shadow_addr;
@@ -95,12 +101,39 @@
 		break;
 	}
 
-	pr_err("BUG: KASAN: %s in %pS at addr %p\n",
-		bug_type, (void *)info->ip,
-		info->access_addr);
-	pr_err("%s of size %zu by task %s/%d\n",
-		info->is_write ? "Write" : "Read",
-		info->access_size, current->comm, task_pid_nr(current));
+	return bug_type;
+}
+
+const char *get_wild_bug_type(struct kasan_access_info *info)
+{
+	const char *bug_type = "unknown-crash";
+
+	if ((unsigned long)info->access_addr < PAGE_SIZE)
+		bug_type = "null-ptr-deref";
+	else if ((unsigned long)info->access_addr < TASK_SIZE)
+		bug_type = "user-memory-access";
+	else
+		bug_type = "wild-memory-access";
+
+	return bug_type;
+}
+
+static const char *get_bug_type(struct kasan_access_info *info)
+{
+	if (addr_has_shadow(info))
+		return get_shadow_bug_type(info);
+	return get_wild_bug_type(info);
+}
+
+static void print_error_description(struct kasan_access_info *info)
+{
+	const char *bug_type = get_bug_type(info);
+
+	pr_err("BUG: KASAN: %s in %pS\n",
+		bug_type, (void *)info->ip);
+	pr_err("%s of size %zu at addr %p by task %s/%d\n",
+		info->is_write ? "Write" : "Read", info->access_size,
+		info->access_addr, current->comm, task_pid_nr(current));
 }
 
 static inline bool kernel_or_module_addr(const void *addr)
@@ -139,9 +172,9 @@
 	kasan_enable_current();
 }
 
-static void print_track(struct kasan_track *track)
+static void print_track(struct kasan_track *track, const char *prefix)
 {
-	pr_err("PID = %u\n", track->pid);
+	pr_err("%s by task %u:\n", prefix, track->pid);
 	if (track->stack) {
 		struct stack_trace trace;
 
@@ -152,59 +185,84 @@
 	}
 }
 
-static void kasan_object_err(struct kmem_cache *cache, void *object)
+static struct page *addr_to_page(const void *addr)
+{
+	if ((addr >= (void *)PAGE_OFFSET) &&
+			(addr < high_memory))
+		return virt_to_head_page(addr);
+	return NULL;
+}
+
+static void describe_object_addr(struct kmem_cache *cache, void *object,
+				const void *addr)
+{
+	unsigned long access_addr = (unsigned long)addr;
+	unsigned long object_addr = (unsigned long)object;
+	const char *rel_type;
+	int rel_bytes;
+
+	pr_err("The buggy address belongs to the object at %p\n"
+	       " which belongs to the cache %s of size %d\n",
+		object, cache->name, cache->object_size);
+
+	if (!addr)
+		return;
+
+	if (access_addr < object_addr) {
+		rel_type = "to the left";
+		rel_bytes = object_addr - access_addr;
+	} else if (access_addr >= object_addr + cache->object_size) {
+		rel_type = "to the right";
+		rel_bytes = access_addr - (object_addr + cache->object_size);
+	} else {
+		rel_type = "inside";
+		rel_bytes = access_addr - object_addr;
+	}
+
+	pr_err("The buggy address is located %d bytes %s of\n"
+	       " %d-byte region [%p, %p)\n",
+		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
+		(void *)(object_addr + cache->object_size));
+}
+
+static void describe_object(struct kmem_cache *cache, void *object,
+				const void *addr)
 {
 	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
 
+	if (cache->flags & SLAB_KASAN) {
+		print_track(&alloc_info->alloc_track, "Allocated");
+		pr_err("\n");
+		print_track(&alloc_info->free_track, "Freed");
+		pr_err("\n");
+	}
+
+	describe_object_addr(cache, object, addr);
+}
+
+static void print_address_description(void *addr)
+{
+	struct page *page = addr_to_page(addr);
+
 	dump_stack();
-	pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
-		cache->object_size);
+	pr_err("\n");
 
-	if (!(cache->flags & SLAB_KASAN))
-		return;
+	if (page && PageSlab(page)) {
+		struct kmem_cache *cache = page->slab_cache;
+		void *object = nearest_obj(cache, page,	addr);
 
-	pr_err("Allocated:\n");
-	print_track(&alloc_info->alloc_track);
-	pr_err("Freed:\n");
-	print_track(&alloc_info->free_track);
-}
+		describe_object(cache, object, addr);
+	}
 
-void kasan_report_double_free(struct kmem_cache *cache, void *object,
-			s8 shadow)
-{
-	unsigned long flags;
+	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
+		pr_err("The buggy address belongs to the variable:\n");
+		pr_err(" %pS\n", addr);
+	}
 
-	kasan_start_report(&flags);
-	pr_err("BUG: Double free or freeing an invalid pointer\n");
-	pr_err("Unexpected shadow byte: 0x%hhX\n", shadow);
-	kasan_object_err(cache, object);
-	kasan_end_report(&flags);
-}
-
-static void print_address_description(struct kasan_access_info *info)
-{
-	const void *addr = info->access_addr;
-
-	if ((addr >= (void *)PAGE_OFFSET) &&
-		(addr < high_memory)) {
-		struct page *page = virt_to_head_page(addr);
-
-		if (PageSlab(page)) {
-			void *object;
-			struct kmem_cache *cache = page->slab_cache;
-			object = nearest_obj(cache, page,
-						(void *)info->access_addr);
-			kasan_object_err(cache, object);
-			return;
-		}
+	if (page) {
+		pr_err("The buggy address belongs to the page:\n");
 		dump_page(page, "kasan: bad access detected");
 	}
-
-	if (kernel_or_module_addr(addr)) {
-		if (!init_task_stack_addr(addr))
-			pr_err("Address belongs to variable %pS\n", addr);
-	}
-	dump_stack();
 }
 
 static bool row_is_guilty(const void *row, const void *guilty)
@@ -259,31 +317,34 @@
 	}
 }
 
+void kasan_report_double_free(struct kmem_cache *cache, void *object,
+				void *ip)
+{
+	unsigned long flags;
+
+	kasan_start_report(&flags);
+	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip);
+	pr_err("\n");
+	print_address_description(object);
+	pr_err("\n");
+	print_shadow_for_address(object);
+	kasan_end_report(&flags);
+}
+
 static void kasan_report_error(struct kasan_access_info *info)
 {
 	unsigned long flags;
-	const char *bug_type;
 
 	kasan_start_report(&flags);
 
-	if (info->access_addr <
-			kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
-		if ((unsigned long)info->access_addr < PAGE_SIZE)
-			bug_type = "null-ptr-deref";
-		else if ((unsigned long)info->access_addr < TASK_SIZE)
-			bug_type = "user-memory-access";
-		else
-			bug_type = "wild-memory-access";
-		pr_err("BUG: KASAN: %s on address %p\n",
-			bug_type, info->access_addr);
-		pr_err("%s of size %zu by task %s/%d\n",
-			info->is_write ? "Write" : "Read",
-			info->access_size, current->comm,
-			task_pid_nr(current));
+	print_error_description(info);
+	pr_err("\n");
+
+	if (!addr_has_shadow(info)) {
 		dump_stack();
 	} else {
-		print_error_description(info);
-		print_address_description(info);
+		print_address_description((void *)info->access_addr);
+		pr_err("\n");
 		print_shadow_for_address(info->first_bad_addr);
 	}
 
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 7675d11..68637c9 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -64,6 +64,11 @@
 include scripts/Makefile.host
 endif
 
+# Do not include host rules unless needed
+ifneq ($(dtbo-y),)
+include scripts/Makefile.dtbo
+endif
+
 ifneq ($(KBUILD_SRC),)
 # Create output directory if not already present
 _dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
diff --git a/scripts/Makefile.dtbo b/scripts/Makefile.dtbo
new file mode 100644
index 0000000..db4a0f4
--- /dev/null
+++ b/scripts/Makefile.dtbo
@@ -0,0 +1,24 @@
+__dtbo := $(sort $(dtbo-y))
+
+dtbo-base	:= $(sort $(foreach m,$(__dtbo),$($(m)-base)))
+dtbo := $(foreach m,$(__dtbo),$(if $($(m)-base),$(m)))
+
+__dtbo     := $(addprefix $(obj)/,$(__dtbo))
+dtbo-base	:= $(addprefix $(obj)/,$(dtbo-base))
+dtbo	:= $(addprefix $(obj)/,$(dtbo))
+
+ifneq ($(DTC_OVERLAY_TEST_EXT),)
+DTC_OVERLAY_TEST = $(DTC_OVERLAY_TEST_EXT)
+quiet_cmd_dtbo_verify	= VERIFY  $@
+cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).dtb
+else
+cmd_dtbo_verify = true
+endif
+
+$(obj)/%.dtbo: $(src)/%.dts FORCE
+	$(call if_changed_dep,dtc)
+	$(call if_changed,dtbo_verify)
+
+$(call multi_depend, $(dtbo), , -base)
+
+always +=  $(dtbo)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index d3d3320..f156681 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -301,6 +301,12 @@
 $(obj)/%.dtb.S: $(obj)/%.dtb
 	$(call cmd,dt_S_dtb)
 
+ifneq ($(DTC_EXT),)
+DTC = $(DTC_EXT)
+else
+DTC = $(objtree)/scripts/dtc/dtc
+endif
+
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
 	$(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index 2a48022..2eb4aec 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -1,7 +1,9 @@
 # scripts/dtc makefile
 
 hostprogs-y	:= dtc
+ifeq ($(DTC_EXT),)
 always		:= $(hostprogs-y)
+endif
 
 dtc-objs	:= dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
 		   srcpos.o checks.o util.o