Merge "msm: kgsl: Update slumber sequence" into msm-4.9
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index d28f7ba..2a8d506 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -6,6 +6,13 @@
 INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
 endif
 
+TARGET_KERNEL_MAKE_ENV := $(strip $(TARGET_KERNEL_MAKE_ENV))
+ifeq ($(TARGET_KERNEL_MAKE_ENV),)
+KERNEL_MAKE_ENV :=
+else
+KERNEL_MAKE_ENV := $(TARGET_KERNEL_MAKE_ENV)
+endif
+
 TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH))
 ifeq ($(TARGET_KERNEL_ARCH),)
 KERNEL_ARCH := arm
@@ -88,8 +95,8 @@
 endif
 
 KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr
-KERNEL_MODULES_INSTALL := system
-KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules
+KERNEL_MODULES_INSTALL ?= system
+KERNEL_MODULES_OUT ?= $(PRODUCT_OUT)/$(KERNEL_MODULES_INSTALL)/lib/modules
 
 TARGET_PREBUILT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)
 
@@ -121,26 +128,26 @@
 	mkdir -p $(KERNEL_OUT)
 
 $(KERNEL_CONFIG): $(KERNEL_OUT)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
 	$(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
 			echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
 
 $(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
 	$(hide) echo "Building kernel..."
 	$(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
 	$(mv-modules)
 	$(clean-module-folder)
 
 $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
 	$(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
 			rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;\
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;\
 			if [ -d "$(KERNEL_HEADERS_INSTALL)/include/bringup_headers" ]; then \
 				cp -Rf  $(KERNEL_HEADERS_INSTALL)/include/bringup_headers/* $(KERNEL_HEADERS_INSTALL)/include/ ;\
 			fi ;\
@@ -148,20 +155,20 @@
 	$(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \
 			echo "Used a different defconfig for header generation"; \
 			rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi
 	$(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
 			echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
-			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
 
 kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
-	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
+	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
 
 kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG)
 	env KCONFIG_NOTIMESTAMP=true \
-	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
+	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
 	env KCONFIG_NOTIMESTAMP=true \
-	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) savedefconfig
+	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) savedefconfig
 	cp $(KERNEL_OUT)/defconfig $(TARGET_KERNEL_SOURCE)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG)
 
 endif
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fe53218..592fcef 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -141,6 +141,20 @@
 	* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
 	  after enabling the subunit.
 
+* Optional properties for CTI:
+
+	* qcom,cti-gpio-trigin: cti trigger input driven by gpio.
+
+	* qcom,cti-gpio-trigout: cti trigger output sent to gpio.
+
+	* pinctrl-names: names corresponding to the numbered pinctrl. The
+	  allowed names are subset of the following: cti-trigin-pinctrl,
+	  cti-trigout-pctrl.
+
+	* pinctrl-<n>: list of pinctrl phandles for the different pinctrl
+	  states. Refer to
+	  "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
 * Required property for Remote ETMs:
 
 	* qcom,inst-id: must be present. QMI instance id for remote ETMs.
diff --git a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
index b989d8a..e704d70 100644
--- a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
@@ -24,10 +24,12 @@
 	Value type: <prop-encoded-array>
 	Definition: First element is the base address of shared memory
 		Second element is the size of the shared memory region
+		Points to the dictionary address that houses the command DB
+		start address and the size of the command DB region
 
 Example:
 
 qcom,cmd-db@861e0000 {
 	compatible = "qcom,cmd-db";
-	reg = <0x861e0000 0x4000>;
+	reg = <0xc3f000c 0x8>;
 }
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index b381bdeb..c467327 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -10,6 +10,10 @@
 
 - compatible: "qcom,wil6210"
 - qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
+- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
+- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
+- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
+- qcom,smmu-mapping: specifies the base address and size of SMMU space
 - qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
   the below optional properties:
@@ -33,6 +37,10 @@
 	wil6210: qcom,wil6210 {
 		compatible = "qcom,wil6210";
 		qcom,smmu-support;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
 		qcom,msm-bus,name = "wil6210";
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 06b219a..3c8a79a 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -28,6 +28,7 @@
   - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
   - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
   - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,request-bw-before-clk : optional, indicates if the HW supports bandwidth requests prior to clock controls.
   - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
 
   - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index bc226a7..a3ef34c 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -128,6 +128,8 @@
 				feature is available or not.
 - qcom,sde-has-dim-layer:	Boolean property to indicate if mixer has dim layer
 				feature is available or not.
+- qcom,sde-has-idle-pc:		Boolean property to indicate if target has idle
+				power collapse feature available or not.
 - qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
 				feature available or not.
 - qcom,sde-has-cdp:		Boolean property to indicate if cdp feature is
@@ -420,6 +422,7 @@
     qcom,sde-csc-type = "csc-10bit";
     qcom,sde-highest-bank-bit = <15>;
     qcom,sde-has-mixer-gc;
+    qcom,sde-has-idle-pc;
     qcom,sde-sspp-max-rects = <1 1 1 1
 				1 1 1 1
 				1 1
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 3e7fcb7..b043a93 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -35,7 +35,7 @@
 - qcom,mdss-dsi-panel-destination:	A string that specifies the destination display for the panel.
 					"display_1" = DISPLAY_1
 					"display_2" = DISPLAY_2
-- qcom,mdss-dsi-panel-timings:		An array of length 12 that specifies the PHY
+- qcom,mdss-dsi-panel-phy-timings:	An array of length 12 that specifies the PHY
 					timing settings for the panel.
 - qcom,mdss-dsi-panel-timings-8996:		An array of length 40 char that specifies the 8996 PHY lane
 					timing settings for the panel.
@@ -193,6 +193,8 @@
 					"dsi_cmd_mode" = enable command mode.
 - qcom,5v-boost-gpio:			Specifies the panel gpio for display 5v boost.
 - qcom,mdss-dsi-te-check-enable:	Boolean to enable Tear Check configuration.
+- qcom,mdss-dsi-te-using-wd:		Boolean entry enables the watchdog timer support to generate the vsync signal
+					for command mode panel. By default, panel TE will be used to generate the vsync.
 - qcom,mdss-dsi-te-using-te-pin:	Boolean to specify whether using hardware vsync.
 - qcom,mdss-dsi-te-pin-select:		Specifies TE operating mode.
 					0 = TE through embedded dcs command
@@ -456,28 +458,6 @@
 					with the supply entry index. For a detailed description of
 					fields in the supply entry, refer to the qcom,ctrl-supply-entries
 					binding above.
-- qcom,config-select:			Optional property to select default configuration.
-
-[[Optional config sub-nodes]]		These subnodes provide different configurations for a given same panel.
-					Default configuration can be chosen by specifying phandle of the
-					selected subnode in the qcom,config-select.
-Required properties for sub-nodes:	None
-Optional properites:
-- qcom,lm-split:			An array of two values indicating MDP should use two layer
-					mixers to reduce power.
-					Ex: Normally 1080x1920 display uses single DSI and thus one layer
-					    mixer. But if we use two layer mixers then mux the output of
-					    those two mixers into single stream and route it to single DSI
-					    then we can lower the clock requirements of MDP. To use this
-					    configuration we need two fill this array with <540 540>.
-					Both values doesn't have to be same, but recommended, however sum of
-					both values has to be equal to the panel-width.
-					By default two mixer streams are merged using 2D mux, however if
-					2 DSC encoders are used then merge is performed within compression
-					engine.
-- qcom,split-mode:			String property indicating which split mode MDP should use. Valid
-					entries are "pingpong-split" and "dualctl-split".
-					This property is mutually exclusive with qcom,lm-split.
 - qcom,mdss-dsc-version:		An 8 bit value indicates the DSC version supported by panel. Bits[0.3]
 					provides information about minor version while Bits[4.7] provides
 					major version information. It supports only DSC rev 1(Major).1(Minor)
@@ -500,6 +480,21 @@
 - qcom,mdss-dsc-block-prediction-enable: A boolean value to enable/disable the block prediction at decoder.
 - qcom,mdss-dsc-config-by-manufacture-cmd: A boolean to indicates panel use manufacture command to setup pps
 					instead of standard dcs type 0x0A.
+- qcom,display-topology:  		Array of u32 values which specifies the	list of topologies available
+					for the display. A display topology is defined by a
+					set of 3 values in the order:
+					- number of mixers
+					- number of compression encoders
+					- number of interfaces
+					Therefore, the array should always contain a tuple of 3 elements.
+- qcom,default-topology-index:          An u32 value which indexes the topology set
+					specified by the node "qcom,display-topology"
+					to identify the default topology for the
+					display. The first set is indexed by the
+					value 0.
+
+Required properties for sub-nodes:	None
+Optional properties:
 - qcom,dba-panel:	Indicates whether the current panel is used as a display bridge
 					to a non-DSI interface.
 - qcom,bridge-name:			A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
@@ -575,6 +570,7 @@
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
@@ -692,18 +688,15 @@
 					29 00 00 00 00 00 02 F1 00];
 				qcom,mdss-dsi-timing-switch-command-state = "dsi_lp_mode";
 
-				qcom,config-select = <&dsi_sim_vid_config0>;
-				dsi_sim_vid_config0: config0 {
-					qcom,lm-split = <360 360>;
-					qcom,mdss-dsc-encoders = <2>;
-					qcom,mdss-dsc-slice-height = <16>;
-					qcom,mdss-dsc-slice-width = <360>;
-					qcom,mdss-dsc-slice-per-pkt = <2>;
-					qcom,mdss-dsc-bit-per-component = <8>;
-					qcom,mdss-dsc-bit-per-pixel = <8>;
-					qcom,mdss-dsc-block-prediction-enable;
-					qcom,mdss-dsc-config-by-manufacture-cmd;
-				};
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <360>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+				qcom,mdss-dsc-config-by-manufacture-cmd;
+				qcom,display-topology = <1 1 1>;
+				qcom,default-topology-index = <0>;
 			};
 		};
 		qcom,panel-supply-entries {
@@ -737,41 +730,19 @@
 			};
 		};
 
-		qcom,config-select = <&dsi_sim_vid_config0>;
 		qcom,dba-panel;
 		qcom,bridge-name = "adv7533";
 		qcom,mdss-dsc-version = <0x11>;
 		qcom,mdss-dsc-scr-version = <0x1>;
-
-		dsi_sim_vid_config0: config0 {
-			qcom,lm-split = <360 360>;
-			qcom,mdss-dsc-encoders = <2>;
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <360>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-			qcom,mdss-dsc-config-by-manufacture-cmd;
-		};
-
-		dsi_sim_vid_config1: config1 {
-			qcom,mdss-dsc-encoders = <1>;
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <360>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-			qcom,mdss-dsc-config-by-manufacture-cmd;
-		};
-
-		dsi_sim_vid_config2: config2 {
-			qcom,split-mode = "dualctl-split";
-		};
-
-		dsi_sim_vid_config3: config3 {
-			qcom,split-mode = "pingpong-split";
-		};
+		qcom,mdss-dsc-slice-height = <16>;
+		qcom,mdss-dsc-slice-width = <360>;
+		qcom,mdss-dsc-slice-per-pkt = <2>;
+		qcom,mdss-dsc-bit-per-component = <8>;
+		qcom,mdss-dsc-bit-per-pixel = <8>;
+		qcom,mdss-dsc-block-prediction-enable;
+		qcom,mdss-dsc-config-by-manufacture-cmd;
+		qcom,display-topology = <1 1 1>,
+			                <2 2 1>;
+		qcom,default-topology-index = <0>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
new file mode 100644
index 0000000..d62910a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -0,0 +1,147 @@
+* Qualcomm Technologies, Inc. MSM Camera CDM
+
+CDM (Camera Data Mover) is module intended to provide means for fast programming
+camera registers and lookup tables.
+
+=======================
+Required Node Structure
+=======================
+CDM Interface node takes care of the handling has HW nodes and provide interface
+for camera clients.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cam-cdm-intf".
+
+- num-hw-cdm
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported HW blocks.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM interface.
+
+Example:
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "vfe",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+	};
+
+=======================
+Required Node Structure
+=======================
+CDM HW node provides interface for camera clients through
+to CDM interface node.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas-cdm".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM HW node.
+
+Example:
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm0";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"titan_top_ahb_clk",
+			"cam_axi_clk",
+			"camcc_slow_ahb_clk_src",
+			"cpas_top_ahb_clk",
+			"camnoc_axi_clk";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
+		cdm-client-names = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
new file mode 100644
index 0000000..a61bab3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -0,0 +1,282 @@
+* Qualcomm Technologies, Inc. MSM Camera CPAS
+
+The MSM camera CPAS device provides dependency definitions for
+enabling Camera CPAS HW and provides the Client definitions
+for all HW blocks that use CPAS driver for BW voting. These
+definitions consist of various properties that define the list
+of clients supported, AHB, AXI master-slave IDs used for BW
+voting.
+
+=======================
+Required Node Structure
+=======================
+The camera CPAS device must be described in four levels of device nodes. The
+first level describes the overall CPAS device. Within it, second level nodes
+describe the list of AXI ports that map different clients for AXI BW voting.
+Third level nodes describe the details of each AXI port having name, mnoc,
+camnoc AXI Bus information. Fourth level nodes describe the details of Bus
+master-slave IDs, ab, ib values for mnoc, camnoc bus interface.
+
+==================================
+First Level Node - CAM CPAS device
+==================================
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cpas".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas_top" or "camss_top".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CAMNOC HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CPAS HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CPAS HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CPAS HW.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- client-id-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether CPAS clients are ID based.
+
+- client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CPAS.
+
+- client-axi-port-names
+  Usage: required
+  Value type: <string>
+  Definition: AXI Port name for each client.
+
+- client-bus-camnoc-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether Clients are connected
+              through CAMNOC for AXI access.
+
+===================================================================
+Third Level Node - CAM AXI Port properties
+===================================================================
+- qcom,axi-port-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the AXI Port.
+
+===================================================================
+Fourth Level Node - CAM AXI Bus properties
+===================================================================
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- qcom,msm-bus-vector-dyn-vote
+  Usage: optional
+  Value type: <empty>
+  Definition: Bool property specifying whether this bus client
+              is dynamic vote based.
+
+Example:
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
new file mode 100644
index 0000000..f9a5e0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
@@ -0,0 +1,111 @@
+* Qualcomm Technologies, Inc. MSM Camera IFE CSID
+
+Camera IFE CSID device provides the definitions for enabling
+the IFE CSID hardware. It also provides the functions for the client
+to control the IFE CSID hardware.
+
+=======================
+Required Node Structure
+=======================
+The IFE CSID device is described in one level of the device node.
+
+======================================
+First Level Node - CAM IFE CSID device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,csid170" or "qcom,csid-lite170".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should be "csid".
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with IFE CSID HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for IFE CSID HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+                "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for IFE CSID HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for IFE CSID HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+
+
+Example:
+
+	qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid170";
+		reg = <0xacb3000 0x1000>;
+		reg-names = "csid";
+		interrupts = <0 464 0>;
+		interrupt-names = "csid";
+		vdd-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>;
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 384000000>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
new file mode 100644
index 0000000..13aae64
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
@@ -0,0 +1,31 @@
+* Qualcomm Technologies, Inc. MSM Camera ISP
+
+The MSM camera ISP driver provides the definitions for enabling
+the Camera ISP hadware. It provides the functions for the Client to
+control the ISP hardware.
+
+=======================
+Required Node Structure
+=======================
+The camera ISP device is described in one level of device node.
+
+==================================
+First Level Node - CAM ISP device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-isp".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "vfe" or "ife".
+
+Example:
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
new file mode 100644
index 0000000..2ed913c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -0,0 +1,130 @@
+* Qualcomm Technologies, Inc. MSM Camera SMMU
+
+The MSM camera SMMU device provides SMMU context bank definitions
+for all HW blocks that need to map IOVA to physical memory. These
+definitions consist of various properties that define how the
+IOVA address space is laid out for each HW block in the camera
+subsystem.
+
+=======================
+Required Node Structure
+=======================
+The camera SMMU device must be described in three levels of device nodes. The
+first level describes the overall SMMU device. Within it, second level nodes
+describe individual context banks that map different stream ids. There can
+also be second level nodes describing firmware device nodes. Each HW block
+such as IFE, ICP maps into these second level device nodes. All context bank
+specific properties that define how the IOVA is laid out is contained within
+third level device nodes within the second level device nodes.
+
+During the kernel initialization all the devices are probed recursively and
+a device pointer is created for each context bank keeping track of the IOVA
+mapping information.
+
+Duplicate regions of the same type are not allowed within the same
+context bank. All context banks must contain an IO region at the very least.
+
+==================================
+First Level Node - CAM SMMU device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu".
+
+===================================================================
+Second Level Node - CAM SMMU context bank device or firmware device
+===================================================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu-cb" or "qcom,msm-cam-smmu-fw-dev".
+
+- memory-region
+  Usage: optional
+  Value type: <phandle>
+  Definition: Should specify the phandle of the memory region for firmware.
+    allocation
+
+- iommus
+  Usage: required
+  Value type: <phandle>
+  Definition: Should specify the phandle of the iommu sid.
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the context bank.
+
+=============================================
+Third Level Node - CAM SMMU memory map device
+=============================================
+- iova-region-name
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the IOVA region.
+
+- iova-region-start
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify start IOVA for region.
+
+- iova-region-len
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify length for IOVA region.
+
+- iova-region-id
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the numerical identifier for IOVA region.
+    Allowed values are: 0x00 to 0x03
+      - Firmware region: 0x00
+      - Shared region: 0x01
+      - Scratch region: 0x02
+      - IO region: 0x03
+
+Example:
+	qcom,cam_smmu@0 {
+		compatible = "qcom,msm-cam-smmu";
+
+		msm_cam_smmu_icp {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1078>,
+				<&apps_smmu 0x1020>,
+				<&apps_smmu 0x1028>,
+				<&apps_smmu 0x1040>,
+				<&apps_smmu 0x1048>,
+				<&apps_smmu 0x1030>,
+				<&apps_smmu 0x1050>;
+			label = "icp";
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					/* Firmware region is 5MB */
+				        iova-region-name = "firmware";
+				        iova-region-start = <0x0>;
+				        iova-region-len = <0x500000>;
+					iova-region-id = <0x0>;
+					status = "ok";
+				};
+
+			        iova-mem-region-shared {
+					/* Shared region is 100MB long */
+				        iova-region-name = "shared";
+				        iova-region-start = <0x7400000>;
+				        iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+				        status = "ok";
+				};
+
+			        iova-mem-region-io {
+				        /* IO region is approximately 3.5 GB */
+				        iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+				        iova-region-len = <0xd2800000>;
+				        iova-region-id = <0x3>;
+				        status = "ok";
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index ea828da..bc844de 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -16,6 +16,10 @@
 		      If "halt_base" is in same 4K pages this register then
 		      this will be defined else "halt_q6", "halt_modem",
 		      "halt_nc" is required.
+		      "pdc_sync" is the power domain register introduced in
+		      sdm845 for power domain of subsystems.
+		      If alternative reset is required, "alt_reset" maps to
+		      mss_alt_ares.
 - interrupts:         The modem watchdog interrupt
 - vdd_cx-supply:      Reference to the regulator that supplies the vdd_cx domain.
 - vdd_cx-voltage:     Voltage corner/level(max) for cx rail.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
new file mode 100644
index 0000000..ca584e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -0,0 +1,73 @@
+Qualcomm Technologies, Inc. SMB1355 Charger Specific Bindings
+
+SMB1355 slave charger is paired with QTI family of standalone chargers to
+enable a high current, low profile Li+ battery charging system.
+
+The device provides 28V DC withstand, wide operating input range of 3.8 to
+14.2V for standard 5V USB inputs as well as a wide variety of HVDCP Travel
+Adapters and is compatible with QTI's Quick Charge technology.
+
+=======================
+Required Node Structure
+=======================
+
+SMB1355 Charger must be described in two levels of device nodes.
+
+==================================
+First Level Node - SMB1355 Charger
+==================================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,smb1355".
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of SMB's revid module. This is used
+	      to identify the SMB subtype.
+
+================================================
+Second Level Nodes - SMB1355 Charger Peripherals
+================================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======
+Example
+=======
+
+smb1355_charger: qcom,smb1355-charger {
+	compatible = "qcom,smb1355";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts = <0x10 0x1 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "chg-state-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts = <0x16 0x1 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "wdog-bark";
+	};
+};
diff --git a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
index 5bf560e..846bd22 100644
--- a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
@@ -216,6 +216,15 @@
 		    as the corresponding addresses are specified in
 		    the qcom,cpr-panic-reg-addr-list property.
 
+- qcom,cpr-reset-step-quot-loop-en
+	Usage:      optional; only meaningful for CPR4 and CPRh controllers
+	Value type: <empty>
+	Definition: Boolean value which indicates that the CPR controller should
+		    be configured to reset step_quot on each loop_en = 0
+		    transition. This configuration allows the CPR controller to
+		    first use the default step_quot and then later switch to the
+		    run-time calibrated step_quot.
+
 - qcom,saw-avs-ctrl
 	Usage:      required if "saw" registers are specified by reg and
 		    reg-names properties
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 8efa85d..0c6a9f2 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -213,6 +213,18 @@
 		    target quotient adjustment due to an ACD up recommendation.
 		    Valid values are 0 through 3.
 
+- qcom,cpr-acd-notwait-for-cl-settled
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates ACD down recommendations do not
+		    need to wait for CPR closed-loop to settle.
+
+- qcom,cpr-acd-avg-fast-update
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates CPR should issue immediate
+		    voltage updates following ACD requests.
+
 - qcom,cpr-acd-avg-enable
 	Usage:      optional
 	Value type: <empty>
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
index 04b624b..0173a3d 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
@@ -19,6 +19,7 @@
 Optional properties:
 - qcom,bus-mas: contains the bus master id needed to put in bus bandwidth votes
 		for inter-connect buses.
+- qcom,wakeup-byte: Byte to be injected in the tty layer during wakeup isr.
 
 Example:
 qupv3_uart11: qcom,qup_uart@0xa88000 {
@@ -34,4 +35,5 @@
 	pinctrl-1 = <&qup_1_uart_3_sleep>;
 	interrupts = <0 355 0>;
 	qcom,bus-mas = <MASTER_BLSP_2>;
+	qcom,wakeup-byte = <0xFF>;
 };
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 702f252..28ab2dd 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -11,8 +11,7 @@
 VADC_TM node
 
 Required properties:
-- compatible : should be "qcom,qpnp-adc-tm" for thermal ADC driver.
-	     : should be "qcom,qpnp-adc-tm-hc" for thermal ADC driver using
+- compatible : should be "qcom,qpnp-adc-tm-hc" for thermal ADC driver using
 	       refreshed BTM peripheral.
 - reg : offset and length of the PMIC Aribter register map.
 - address-cells : Must be one.
@@ -156,51 +155,6 @@
 	qcom,client-adc_tm = <&pm8941_adc_tm>;
 };
 
-Example for "qcom,qpnp-adc-tm" device:
-	/* Main Node */
-	qcom,vadc@3400 {
-                        compatible = "qcom,qpnp-adc-tm";
-                        reg = <0x3400 0x100>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-                        interrupts = <0x0 0x34 0x0>,
-					<0x0 0x34 0x3>,
-					<0x0 0x34 0x4>;
-			interrupt-names = "eoc-int-en-set",
-					  "high-thr-en-set",
-					  "low-thr-en-set";
-                        qcom,adc-bit-resolution = <15>;
-                        qcom,adc-vdd-reference = <1800>;
-			qcom,adc_tm-vadc = <&pm8941_vadc>;
-
-			/* Channel Node to be registered as part of thermal sysfs */
-                        chan@b5 {
-                                label = "pa_therm1";
-				reg = <0xb5>;
-                                qcom,decimation = <0>;
-                                qcom,pre-div-channel-scaling = <0>;
-                                qcom,calibration-type = "absolute";
-                                qcom,scale-function = <2>;
-                                qcom,hw-settle-time = <0>;
-                                qcom,fast-avg-setup = <0>;
-				qcom,btm-channel-number = <0x70>;
-				qcom,thermal-node;
-                        };
-
-			/* Channel Node */
-			chan@6 {
-				label = "vbat_sns";
-				reg = <6>;
-				qcom,decimation = <0>;
-				qcom,pre-div-channel-scaling = <1>;
-				qcom,calibration-type = "absolute";
-				qcom,scale-function = <3>;
-				qcom,hw-settle-time = <0>;
-				qcom,fast-avg-setup = <0>;
-				qcom,btm-channel-number = <0x78>;
-			};
-	};
-
 Example for "qcom,qpnp-adc-tm-hc" device:
 	/* Main Node */
 	pm8998_adc_tm: vadc@3400 {
@@ -218,7 +172,7 @@
 
 			/* Channel Node to be registered as part of thermal sysfs */
                         chan@b5 {
-                                label = "pa_therm1";
+                                label = "msm_therm";
 				reg = <0xb5>;
                                 qcom,pre-div-channel-scaling = <0>;
                                 qcom,calibration-type = "absolute";
@@ -239,3 +193,21 @@
 				qcom,btm-channel-number = <0x78>;
 			};
 	};
+
+/* Example to register thermal sensor using of_thermal */
+&thermal_zones {
+	msm-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0xb5>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
index bb20644..fc8ec87 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
@@ -12,10 +12,17 @@
 - interrupts:      PMIC temperature alarm interrupt
 - label:           A string used as a descriptive name for this thermal device.
 		    This name should be 19 characters or less.
+- #thermal-sensor-cells: Must be 0. Please refer to
+			 <devicetree/bindings/thermal/thermal.txt> for more
+			 details.
 
 Required structure:
 - A qcom,qpnp-temp-alarm node must be a child of an SPMI node that has specified
    the spmi-slave-container property
+- A top level device tree node named "thermal-zones" must exist. It must
+   contain a subnode with a property named "thermal-sensors" which is assigned
+   a phandle to the qpnp-temp-alarm device node. See
+   <devicetree/bindings/thermal/thermal.txt> for more details.
 
 Optional properties:
 - qcom,channel-num:    VADC channel number associated PMIC DIE_TEMP thermistor.
@@ -38,11 +45,6 @@
 			 1 = 50 Hz
 			 2 = 25 Hz
 			 3 = 12.5 Hz
-- qcom,allow-override: Boolean which controls the ability of software to
-			override shutdowns.  If present, then software is
-			allowed to override automatic PMIC hardware stage 2 and
-			stage 3 over temperature shutdowns.  Otherwise, software
-			is not allowed to override automatic shutdown.
 - qcom,default-temp:   Specifies the default temperature in millicelcius to use
 			if no ADC channel is present to read the real time
 			temperature.
@@ -64,7 +66,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
-		qcom,temp-alarm@2400 {
+		pm8941_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0>;
@@ -72,6 +74,36 @@
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8941_vadc>;
+			#thermal-sensor-cells = <0>;
+		};
+	};
+};
+
+Below is an example thermal zone definition for the temperature alarm
+peripheral.
+thermal-zones {
+	pm8941_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8941_tz>;
+
+		trips {
+			pm8941-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8941-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8941-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
 		};
 	};
 };
diff --git a/Makefile b/Makefile
index 2b8f550..f834951 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 25
+SUBLEVEL = 27
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index b65930a..54b54da 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,10 +17,11 @@
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
+#define ATOMIC_INIT(i)	{ (i) }
+
 #ifndef CONFIG_ARC_PLAT_EZNPS
 
 #define atomic_read(v)  READ_ONCE((v)->counter)
-#define ATOMIC_INIT(i)	{ (i) }
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e..aee1a77 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
 	;
 	; Now manually save: r12, sp, fp, gp, r25
 
+	PUSH	r30
 	PUSH	r12
 
 	; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
 	POPAX	AUX_USER_SP
 1:
 	POP	r12
+	POP	r30
 
 .endm
 
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da..47111d5 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@
 	unsigned long fp;
 	unsigned long sp;	/* user/kernel sp depending on where we came from  */
 
-	unsigned long r12;
+	unsigned long r12, r30;
 
 	/*------- Below list auto saved by h/w -----------*/
 	unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
index 967e71f..fc4ff37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -26,4 +26,13 @@
 &blsp1_uart2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart2_console_active>;
+	status = "ok";
+};
+
+&gdsc_usb30 {
+	compatible = "regulator-fixed";
+};
+
+&gdsc_pcie {
+	compatible = "regulator-fixed";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 0afa5a8..ca6922d 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -12,6 +12,8 @@
 
 
 #include "skeleton.dtsi"
+
+#include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
 
 / {
@@ -148,10 +150,33 @@
 		#clock-cells = <1>;
 	};
 
+	clock_rpmh: qcom,rpmhclk {
+		compatible = "qcom,dummycc";
+		clock-output-names = "rpmh_clocks";
+		#clock-cells = <1>;
+	};
+
 	blsp1_uart2: serial@831000 {
 		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 		reg = <0x831000 0x200>;
 		interrupts = <0 26 0>;
 		status = "disabled";
+		clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+			<&clock_gcc GCC_BLSP1_AHB_CLK>;
+		clock-names = "core", "iface";
+	};
+
+	gdsc_usb30: qcom,gdsc@10b004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_usb30";
+		reg = <0x0010b004 0x4>;
+		status = "ok";
+	};
+
+	gdsc_pcie: qcom,gdsc@137004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_pcie";
+		reg = <0x00137004 0x4>;
+		status = "ok";
 	};
 };
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1f6d2cc..bcef117 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -208,6 +208,7 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_THERMAL=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 5d61163..5601276 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -199,6 +199,7 @@
 CONFIG_MSM_CDC_PINCTRL=y
 CONFIG_MSM_CDC_SUPPLY=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
 CONFIG_SOUND=y
 CONFIG_SND=y
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b861876..84867ba 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1105,6 +1105,11 @@
 	  Space separated list of names of dtbs to append when
 	  building a concatenated Image.gz-dtb.
 
+config BUILD_ARM64_DT_OVERLAY
+	bool "enable DT overlay compilation support"
+	depends on OF
+	help
+	  This option enables support for DT overlay compilation.
 endmenu
 
 menu "Userspace binary formats"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index b661fe7..3d9d6f3 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -62,6 +62,9 @@
 
 	  If in doubt, say N.
 
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+	bool "Enforce strict break-before-make on page table updates "
+
 source "drivers/hwtracing/coresight/Kconfig"
 
 endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 13a64c9..1570602 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -103,6 +103,10 @@
 
 KBUILD_DTBS	:= dtbs
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+export DTC_FLAGS := -@
+endif
+
 all:	$(KBUILD_IMAGE) $(KBUILD_DTBS)
 
 boot := arch/arm64/boot
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c32324f..ff2cc3e 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -11,7 +11,8 @@
 	sdm845-v2-cdp.dtb \
 	sdm845-qrd.dtb \
 	sdm845-4k-panel-mtp.dtb \
-	sdm845-4k-panel-cdp.dtb
+	sdm845-4k-panel-cdp.dtb \
+	sdm845-4k-panel-qrd.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index c6dfc8d..c52c18b 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -221,41 +221,12 @@
 		qcom,mdss-dsi-te-using-te-pin;
 
 		qcom,compression-mode = "dsc";
-		qcom,config-select = <&dsi_nt35597_truly_dsc_cmd_config0>;
+		qcom,mdss-dsc-slice-height = <16>;
+		qcom,mdss-dsc-slice-width = <720>;
+		qcom,mdss-dsc-slice-per-pkt = <2>;
+		qcom,mdss-dsc-bit-per-component = <8>;
+		qcom,mdss-dsc-bit-per-pixel = <8>;
+		qcom,mdss-dsc-block-prediction-enable;
 
-		dsi_nt35597_truly_dsc_cmd_config0: config0 {
-			qcom,mdss-dsc-encoders = <1>;
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
-
-		dsi_nt35597_truly_dsc_cmd_config1: config1 {
-			qcom,lm-split = <720 720>;
-			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
-
-		dsi_nt35597_truly_dsc_cmd_config2: config2 {
-			qcom,lm-split = <720 720>;
-			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
index 334120a..fe9129c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -208,41 +208,11 @@
 		qcom,mdss-pan-physical-height-dimension = <131>;
 
 		qcom,compression-mode = "dsc";
-		qcom,config-select = <&dsi_nt35597_truly_dsc_video_config0>;
-
-		dsi_nt35597_truly_dsc_video_config0: config0 {
-			qcom,mdss-dsc-encoders = <1>;
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
-
-		dsi_nt35597_truly_dsc_video_config1: config1 {
-			qcom,lm-split = <720 720>;
-			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
-
-		dsi_nt35597_truly_dsc_video_config2: config2 {
-			qcom,lm-split = <720 720>;
-			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
-			qcom,mdss-dsc-slice-height = <16>;
-			qcom,mdss-dsc-slice-width = <720>;
-			qcom,mdss-dsc-slice-per-pkt = <2>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
+		qcom,mdss-dsc-slice-height = <16>;
+		qcom,mdss-dsc-slice-width = <720>;
+		qcom,mdss-dsc-slice-per-pkt = <2>;
+		qcom,mdss-dsc-bit-per-component = <8>;
+		qcom,mdss-dsc-bit-per-pixel = <8>;
+		qcom,mdss-dsc-block-prediction-enable;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 25c949c..061f1d9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -77,17 +77,11 @@
 				 05 01 00 00 78 00 02 10 00];
 
 		qcom,compression-mode = "dsc";
-		qcom,config-select = <&dsi_sharp_dsc_cmd_config0>;
-
-		dsi_sharp_dsc_cmd_config0: config0 {
-			qcom,mdss-dsc-encoders = <1>;
-			qcom,mdss-dsc-slice-height = <32>;
-			qcom,mdss-dsc-slice-width = <1080>;
-			qcom,mdss-dsc-slice-per-pkt = <1>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
+		qcom,mdss-dsc-slice-height = <32>;
+		qcom,mdss-dsc-slice-width = <1080>;
+		qcom,mdss-dsc-slice-per-pkt = <1>;
+		qcom,mdss-dsc-bit-per-component = <8>;
+		qcom,mdss-dsc-bit-per-pixel = <8>;
+		qcom,mdss-dsc-block-prediction-enable;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index cc093d6..e43da55 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -70,17 +70,11 @@
 				 05 01 00 00 78 00 02 10 00];
 
 		qcom,compression-mode = "dsc";
-		qcom,config-select = <&dsi_sharp_dsc_video_config0>;
-
-		dsi_sharp_dsc_video_config0: config0 {
-			qcom,mdss-dsc-encoders = <1>;
-			qcom,mdss-dsc-slice-height = <32>;
-			qcom,mdss-dsc-slice-width = <1080>;
-			qcom,mdss-dsc-slice-per-pkt = <1>;
-
-			qcom,mdss-dsc-bit-per-component = <8>;
-			qcom,mdss-dsc-bit-per-pixel = <8>;
-			qcom,mdss-dsc-block-prediction-enable;
-		};
+		qcom,mdss-dsc-slice-height = <32>;
+		qcom,mdss-dsc-slice-width = <1080>;
+		qcom,mdss-dsc-slice-per-pkt = <1>;
+		qcom,mdss-dsc-bit-per-component = <8>;
+		qcom,mdss-dsc-bit-per-pixel = <8>;
+		qcom,mdss-dsc-block-prediction-enable;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 241aa71..1f08294 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -58,6 +58,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-panel-hdr-enabled;
 		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 509547f..36f36fb 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -55,6 +55,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
 			05 01 00 00 0a 00 01 00
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index f1501fa..6a3e8b4 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -61,7 +61,7 @@
 		reg = <0x15000000 0x80000>,
 			<0x150c2000 0x20>;
 		reg-names = "base", "tcu-base";
-		#iommu-cells = <1>;
+		#iommu-cells = <2>;
 		qcom,skip-init;
 		#global-interrupts = <1>;
 		#size-cells = <1>;
@@ -145,7 +145,6 @@
 			<0 1000>;
 
 		anoc_1_tbu: anoc_1_tbu@0x150c5000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150c5000 0x1000>,
 				<0x150c2200 0x8>;
@@ -167,7 +166,6 @@
 		};
 
 		anoc_2_tbu: anoc_2_tbu@0x150c9000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150c9000 0x1000>,
 				<0x150c2208 0x8>;
@@ -189,7 +187,6 @@
 		};
 
 		mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x150cd000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150cd000 0x1000>,
 				<0x150c2210 0x8>;
@@ -211,7 +208,6 @@
 		};
 
 		mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x150d1000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150d1000 0x1000>,
 				<0x150c2218 0x8>;
@@ -233,7 +229,6 @@
 		};
 
 		mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x150d5000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150d5000 0x1000>,
 				<0x150c2220 0x8>;
@@ -255,7 +250,6 @@
 		};
 
 		compute_dsp_tbu: compute_dsp_tbu@0x150d9000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150d9000 0x1000>,
 				<0x150c2228 0x8>;
@@ -276,7 +270,6 @@
 		};
 
 		adsp_tbu: adsp_tbu@0x150dd000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150dd000 0x1000>,
 				<0x150c2230 0x8>;
@@ -298,7 +291,6 @@
 		};
 
 		anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x150e1000 {
-			status = "disabled";
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x150e1000 0x1000>,
 				<0x150c2238 0x8>;
@@ -336,9 +328,9 @@
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
-		 * This SID belongs to PCIE. We can't use a fake SID for
+		 * This SID belongs to QUP1-GSI. We can't use a fake SID for
 		 * the apps_smmu device.
 		 */
-		iommus = <&apps_smmu 0x1c03>;
+		iommus = <&apps_smmu 0x16 0>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 4036ce5..655f447 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -362,7 +362,7 @@
 		compatible = "qcom,msm-audio-ion";
 		qcom,smmu-version = <2>;
 		qcom,smmu-enabled;
-		iommus = <&apps_smmu 0x1821>;
+		iommus = <&apps_smmu 0x1821 0x0>;
 	};
 
 	qcom,msm-adsp-loader {
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 15db8da..b9a6c79 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -62,11 +62,14 @@
 			};
 		};
 
-		qcom,temp-alarm@2400 {
+		pm8998_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pm8998_tz";
+			qcom,channel-num = <6>;
+			qcom,temp_alarm-vadc = <&pm8998_vadc>;
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8998_gpios: pinctrl@c000 {
@@ -197,3 +200,30 @@
 		#size-cells = <0>;
 	};
 };
+
+&thermal_zones {
+	pm8998_temp_alarm: pm8998_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8998_tz>;
+
+		trips {
+			pm8998_trip0: pm8998-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8998_trip1: pm8998-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8998_trip2: pm8998-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 923804f..b53f7ac 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -31,11 +31,12 @@
 			reg = <0x800 0x100>;
 		};
 
-		qcom,temp-alarm@2400 {
+		pmi8998_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pmi8998_tz";
+			#thermal-sensor-cells = <0>;
 		};
 
 		pmi8998_gpios: pinctrl@c000 {
@@ -796,4 +797,28 @@
 			};
 		};
 	};
+
+	pmi8998_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmi8998_tz>;
+
+		trips {
+			pmi8998_trip0: pmi8998-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			 pmi8998_trip1: pmi8998-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			 pmi8998_trip2: pmi8998-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index d5646bf..122299c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -21,3 +21,26 @@
 	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
 	qcom,board-id = <1 1>;
 };
+
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index d641276..55e615c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -21,3 +21,26 @@
 	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
 	qcom,board-id = <8 1>;
 };
+
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
new file mode 100644
index 0000000..6171c7b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 9d59a16..fcc09a0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -17,7 +17,7 @@
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
 
 &msm_audio_ion {
-	iommus = <&apps_smmu 0x1821>;
+	iommus = <&apps_smmu 0x1821 0x0>;
 	qcom,smmu-sid-mask = /bits/ 64 <0xf>;
 };
 
@@ -34,6 +34,7 @@
 	sound-tavil {
 		compatible = "qcom,sdm845-asoc-snd-tavil";
 		qcom,model = "sdm845-tavil-snd-card";
+		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
 		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index c0189a4..922e990 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -118,10 +118,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -157,10 +159,12 @@
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000>;
-		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
@@ -193,10 +197,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -240,10 +246,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -280,10 +288,12 @@
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000>;
-		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
@@ -322,10 +332,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index c0189a4..922e990 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -118,10 +118,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -157,10 +159,12 @@
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000>;
-		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
@@ -193,10 +197,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -240,10 +246,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_rear_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -280,10 +288,12 @@
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
-		qcom,cam-vreg-min-voltage = <1050000 0 3312000>;
-		qcom,cam-vreg-max-voltage = <1050000 0 3600000>;
-		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+		qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
@@ -322,10 +332,12 @@
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
-		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
-		qcom,cam-vreg-min-voltage = <0 3312000 1050000>;
-		qcom,cam-vreg-max-voltage = <0 3600000 1050000>;
-		qcom,cam-vreg-op-mode = <0 80000 105000>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+		qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+		qcom,cam-vreg-op-mode = <0 80000 105000 0>;
 		qcom,gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cd9c8a8..9ead234 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -71,7 +71,7 @@
 			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
 			<&clock_camcc CAM_CC_CSIPHY1_CLK>,
 			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
-			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
+			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>;
 		clock-names = "camnoc_axi_clk",
@@ -225,4 +225,277 @@
 			status = "ok";
 		};
 	};
+
+	qcom,cam_smmu {
+		compatible = "qcom,msm-cam-smmu";
+		status = "ok";
+
+		msm_cam_smmu_ife {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x808>,
+				<&apps_smmu 0x810>,
+				<&apps_smmu 0x818>,
+				<&apps_smmu 0xc08>,
+				<&apps_smmu 0xc10>,
+				<&apps_smmu 0xc18>;
+			label = "ife";
+			ife_iova_mem_map: iova-mem-map {
+				/* IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_icp_fw {
+			compatible = "qcom,msm-cam-smmu-fw-dev";
+			label="icp";
+			memory-region = <&pil_camera_mem>;
+		};
+
+		msm_cam_smmu_icp {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1078>,
+				<&apps_smmu 0x1020>,
+				<&apps_smmu 0x1028>,
+				<&apps_smmu 0x1040>,
+				<&apps_smmu 0x1048>,
+				<&apps_smmu 0x1030>,
+				<&apps_smmu 0x1050>;
+			label = "icp";
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					/* Firmware region is 5MB */
+					iova-region-name = "firmware";
+					iova-region-start = <0x0>;
+					iova-region-len = <0x500000>;
+					iova-region-id = <0x0>;
+					status = "ok";
+				};
+
+				iova-mem-region-shared {
+					/* Shared region is 100MB long */
+					iova-region-name = "shared";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+					status = "ok";
+				};
+
+				iova-mem-region-io {
+					/* IO region is approximately 3.3 GB */
+					iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+					iova-region-len = <0xd2800000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_smmu_cpas_cdm {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1000>;
+			label = "cpas-cdm0";
+			cpas_cdm_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					/* IO region is approximately 3.4 GB */
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_smmu_secure {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1001>;
+			label = "cam-secure";
+			cam_secure_iova_mem_map: iova-mem-map {
+				/* Secure IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+	};
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"csiphy0", "csiphy1", "csiphy2", "cci0",
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
+
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		cell-index = <0>;
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "ife",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+		status = "ok";
+	};
+
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		reg-cam-base = <0x48000>;
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_camera_ahb",
+			"gcc_camera_axi",
+			"cam_cc_soc_ahb_clk",
+			"cam_cc_cpas_ahb_clk",
+			"cam_cc_camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates = <0 0 0 0 0>;
+		cdm-client-names = "vfe";
+		status = "ok";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 68824d7..2c9c012 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -93,6 +93,10 @@
 	};
 };
 
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -202,6 +206,7 @@
 		spi0 = &qupv3_se8_spi;
 		i2c0 = &qupv3_se10_i2c;
 		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
 	};
 };
 
@@ -224,6 +229,30 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
@@ -245,6 +274,10 @@
 	status = "ok";
 };
 
+&qupv3_se6_4uart {
+	status = "ok";
+};
+
 &usb1 {
 	status = "okay";
 	extcon = <&extcon_usb1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 434de76..f6493ac 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -380,6 +380,31 @@
 
 	};
 
+	hwevent: hwevent@0x014066f0 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x14066f0 0x4>,
+		      <0x14166f0 0x4>,
+		      <0x1406038 0x4>,
+		      <0x1416038 0x4>;
+		reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl",
+			    "ddr-ch23-ctrl";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
 	funnel_in0: funnel@0x6041000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
@@ -457,6 +482,16 @@
 			};
 
 			port@1 {
+				reg = <0>;
+				funnel_in2_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&modem_etm0_out_funnel_in2>;
+				};
+
+			};
+
+			port@2 {
 				reg = <1>;
 				funnel_in2_in_replicator_swao: endpoint {
 					slave-mode;
@@ -466,7 +501,7 @@
 
 			};
 
-			port@2 {
+			port@3 {
 				reg = <2>;
 				funnel_in2_in_funnel_modem: endpoint {
 					slave-mode;
@@ -476,7 +511,7 @@
 
 			};
 
-			port@3 {
+			port@4 {
 				reg = <5>;
 				funnel_in2_in_funnel_apss_merg: endpoint {
 					slave-mode;
@@ -504,6 +539,7 @@
 				     <2 32>,
 				     <3 32>,
 				     <5 32>,
+				     <6 32>,
 				     <10 32>,
 				     <11 32>,
 				     <13 32>;
@@ -563,6 +599,15 @@
 			};
 
 			port@5 {
+				reg = <6>;
+				tpda_in_funnel_turing: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_turing_out_tpda>;
+				};
+			};
+
+			port@6 {
 				reg = <7>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -571,7 +616,7 @@
 				};
 			};
 
-			port@6 {
+			port@7 {
 				reg = <10>;
 				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
@@ -580,7 +625,7 @@
 				};
 			};
 
-			port@7 {
+			port@8 {
 				reg = <11>;
 				tpda_in_tpdm_north: endpoint {
 					slave-mode;
@@ -589,7 +634,7 @@
 				};
 			};
 
-			port@8 {
+			port@9 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -733,7 +778,7 @@
 		coresight-name = "coresight-tpdm-lpass";
 
 		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "core_clk";
+		clock-names = "apb_pclk";
 
 		port {
 			tpdm_lpass_out_funnel_lpass: endpoint {
@@ -859,7 +904,7 @@
 		coresight-name = "coresight-tpda-llm-silver";
 
 		qcom,tpda-atid = <72>;
-		qcom,cmb-elem-size = <0 64>;
+		qcom,cmb-elem-size = <0 32>;
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -914,7 +959,7 @@
 		coresight-name = "coresight-tpda-llm-gold";
 
 		qcom,tpda-atid = <73>;
-		qcom,cmb-elem-size = <0 64>;
+		qcom,cmb-elem-size = <0 32>;
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1013,6 +1058,69 @@
 		};
 	};
 
+	funnel_turing: funnel@6861000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6861000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-turing";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_turing_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_turing>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_turing_in_tpdm_turing: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_turing_out_funnel_turing>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_turing_in_turing_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&turing_etm0_out_funnel_turing>;
+				};
+			};
+		};
+	};
+
+	tpdm_turing: tpdm@6860000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6860000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-turing";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_turing_out_funnel_turing: endpoint {
+				remote-endpoint =
+				    <&funnel_turing_in_tpdm_turing>;
+			};
+		};
+	};
+
 	funnel_ddr_0: funnel@69e2000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
@@ -1279,6 +1387,30 @@
 		};
 	};
 
+	cti_ddr0: cti@69e1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e1000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ddr1: cti@69e4000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e4000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
 	cti0: cti@6010000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
@@ -1315,6 +1447,10 @@
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
+
+		qcom,cti-gpio-trigout = <4>;
+		pinctrl-names = "cti-trigout-pctrl";
+		pinctrl-0 = <&trigout_a>;
 	};
 
 	cti3: cti@6013000 {
@@ -1591,6 +1727,20 @@
 		clock-names = "apb_pclk";
 	};
 
+	turing_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-turing-etm0";
+		qcom,inst-id = <1>;
+
+		port{
+			turing_etm0_out_funnel_turing: endpoint {
+				remote-endpoint =
+					<&funnel_turing_in_turing_etm0>;
+			};
+		};
+	};
+
 	dummy_eud: dummy_sink {
 		compatible = "qcom,coresight-dummy";
 
@@ -1606,6 +1756,20 @@
 		};
 	};
 
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <2>;
+
+		port {
+			modem_etm0_out_funnel_in2: endpoint {
+				remote-endpoint =
+					<&funnel_in2_in_modem_etm0>;
+			};
+		};
+	};
+
 	funnel_apss_merg: funnel@7810000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index bfbaabb..77edb85 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -12,6 +12,12 @@
 
 &soc {
 
+	pil_gpu: qcom,kgsl-hyp {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <13>;
+		qcom,firmware-name = "a630_zap";
+	};
+
 	msm_bus: qcom,kgsl-busmon{
 		label = "kgsl-busmon";
 		compatible = "qcom,kgsl-busmon";
@@ -77,13 +83,16 @@
 		#cooling-cells = <2>;
 
 		clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK>,
-			<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
 			<&clock_gpucc GPU_CC_CXO_CLK>,
 			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
-			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			<&clock_gpucc GPU_CC_CX_GMU_CLK>,
+			<&clock_gpucc GPU_CC_AHB_CLK>,
+			<&clock_gpucc GPU_CC_GX_CXO_CLK>;
 
-		clock-names = "core_clk", "iface_clk", "rbbmtimer_clk",
-			"mem_clk", "mem_iface_clk";
+		clock-names = "core_clk", "rbbmtimer_clk", "mem_clk",
+				"mem_iface_clk", "gmu_clk", "ahb_clk",
+				"cxo_clk";
 
 		qcom,isense-clk-on-level = <1>;
 
@@ -162,19 +171,19 @@
 
 			qcom,gpu-pwrlevel@0 {
 				reg = <0>;
-				qcom,gpu-freq = <548000000>;
-				qcom,bus-freq = <12>;
-				qcom,bus-min = <11>;
-				qcom,bus-max = <12>;
+				qcom,gpu-freq = <280000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
 			};
 
 
 			qcom,gpu-pwrlevel@1 {
 				reg = <1>;
-				qcom,gpu-freq = <425000000>;
-				qcom,bus-freq = <7>;
-				qcom,bus-min = <6>;
-				qcom,bus-max = <8>;
+				qcom,gpu-freq = <280000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
 			};
 
 			qcom,gpu-pwrlevel@2 {
@@ -187,10 +196,10 @@
 
 			qcom,gpu-pwrlevel@3 {
 				reg = <3>;
-				qcom,gpu-freq = <27000000>;
-				qcom,bus-freq = <0>;
-				qcom,bus-min = <0>;
-				qcom,bus-max = <0>;
+				qcom,gpu-freq = <280000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
 			};
 		};
 
@@ -248,13 +257,13 @@
 
 
 		clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>,
-				<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
 				<&clock_gpucc GPU_CC_CXO_CLK>,
 				<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
-				<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+				<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+				<&clock_gpucc GPU_CC_AHB_CLK>;
 
-		clock-names = "gmu_clk", "ahb_clk", "cxo_clk",
-				"axi_clk", "memnoc_clk";
+		clock-names = "gmu_clk", "cxo_clk", "axi_clk",
+				"memnoc_clk", "ahb_clk";
 
 		qcom,gmu-pwrlevels {
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 4391189..e4261e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -12,6 +12,7 @@
 
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm845-camera-sensor-mtp.dtsi"
+#include "smb1355.dtsi"
 
 / {
 	bluetooth: bt_wcn3990 {
@@ -93,6 +94,30 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
@@ -102,6 +127,10 @@
 	qcom,led-strings-list = [01 02];
 };
 
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -199,12 +228,17 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&smb1355_charger {
+	status = "ok";
+};
+
 / {
 aliases {
 		serial0 = &qupv3_se9_2uart;
 		spi0 = &qupv3_se8_spi;
 		i2c0 = &qupv3_se10_i2c;
 		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
 	};
 };
 
@@ -224,6 +258,10 @@
 	status = "ok";
 };
 
+&qupv3_se6_4uart {
+	status = "ok";
+};
+
 &usb1 {
 	status = "okay";
 	extcon = <&extcon_usb1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
new file mode 100644
index 0000000..da5d6fa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+&soc {
+	pcie0: qcom,pcie@0x1c00000 {
+		compatible = "qcom,pci-msm";
+		cell-index = <0>;
+
+		reg = <0x1c00000 0x2000>,
+		      <0x1c06000 0x1000>,
+		      <0x60000000 0xf1d>,
+		      <0x60000f20 0xa8>,
+		      <0x60100000 0x100000>,
+		      <0x60200000 0x100000>,
+		      <0x60300000 0xd00000>;
+
+		reg-names = "parf", "phy", "dm_core", "elbi",
+				"conf", "io", "bars";
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x01000000 0x0 0x60200000 0x60200000 0x0 0x100000>,
+			<0x02000000 0x0 0x60300000 0x60300000 0x0 0xd00000>;
+		interrupt-parent = <&pcie0>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+				20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+				36 37>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0xffffffff>;
+		interrupt-map = <0 0 0 0 &intc 0 141 0
+				0 0 0 1 &intc 0 149 0
+				0 0 0 2 &intc 0 150 0
+				0 0 0 3 &intc 0 151 0
+				0 0 0 4 &intc 0 152 0
+				0 0 0 5 &intc 0 140 0
+				0 0 0 6 &intc 0 672 0
+				0 0 0 7 &intc 0 673 0
+				0 0 0 8 &intc 0 674 0
+				0 0 0 9 &intc 0 675 0
+				0 0 0 10 &intc 0 676 0
+				0 0 0 11 &intc 0 677 0
+				0 0 0 12 &intc 0 678 0
+				0 0 0 13 &intc 0 679 0
+				0 0 0 14 &intc 0 680 0
+				0 0 0 15 &intc 0 681 0
+				0 0 0 16 &intc 0 682 0
+				0 0 0 17 &intc 0 683 0
+				0 0 0 18 &intc 0 684 0
+				0 0 0 19 &intc 0 685 0
+				0 0 0 20 &intc 0 686 0
+				0 0 0 21 &intc 0 687 0
+				0 0 0 22 &intc 0 688 0
+				0 0 0 23 &intc 0 689 0
+				0 0 0 24 &intc 0 690 0
+				0 0 0 25 &intc 0 691 0
+				0 0 0 26 &intc 0 692 0
+				0 0 0 27 &intc 0 693 0
+				0 0 0 28 &intc 0 694 0
+				0 0 0 29 &intc 0 695 0
+				0 0 0 30 &intc 0 696 0
+				0 0 0 31 &intc 0 697 0
+				0 0 0 32 &intc 0 698 0
+				0 0 0 33 &intc 0 699 0
+				0 0 0 34 &intc 0 700 0
+				0 0 0 35 &intc 0 701 0
+				0 0 0 36 &intc 0 702 0
+				0 0 0 37 &intc 0 703 0>;
+
+		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+				"int_d", "int_global_int",
+				"msi_0", "msi_1", "msi_2", "msi_3",
+				"msi_4", "msi_5", "msi_6", "msi_7",
+				"msi_8", "msi_9", "msi_10", "msi_11",
+				"msi_12", "msi_13", "msi_14", "msi_15",
+				"msi_16", "msi_17", "msi_18", "msi_19",
+				"msi_20", "msi_21", "msi_22", "msi_23",
+				"msi_24", "msi_25", "msi_26", "msi_27",
+				"msi_28", "msi_29", "msi_30", "msi_31";
+
+		qcom,phy-sequence = <0x804 0x01 0x0
+					0x034 0x14 0x0
+					0x138 0x30 0x0
+					0x048 0x07 0x0
+					0x15c 0x06 0x0
+					0x090 0x01 0x0
+					0x088 0x20 0x0
+					0x0f0 0x00 0x0
+					0x0f8 0x01 0x0
+					0x0f4 0xc9 0x0
+					0x11c 0xff 0x0
+					0x120 0x3f 0x0
+					0x164 0x01 0x0
+					0x154 0x00 0x0
+					0x148 0x0a 0x0
+					0x05c 0x19 0x0
+					0x038 0x90 0x0
+					0x0b0 0x82 0x0
+					0x0c0 0x02 0x0
+					0x0bc 0xea 0x0
+					0x0b8 0xab 0x0
+					0x0a0 0x00 0x0
+					0x09c 0x0d 0x0
+					0x098 0x04 0x0
+					0x13c 0x00 0x0
+					0x060 0x06 0x0
+					0x068 0x16 0x0
+					0x070 0x36 0x0
+					0x184 0x01 0x0
+					0x15c 0x16 0x0
+					0x138 0x33 0x0
+					0x03c 0x02 0x0
+					0x040 0x07 0x0
+					0x080 0x04 0x0
+					0x0dc 0x00 0x0
+					0x0d8 0x3f 0x0
+					0x00c 0x09 0x0
+					0x010 0x01 0x0
+					0x01c 0x40 0x0
+					0x020 0x01 0x0
+					0x014 0x02 0x0
+					0x018 0x00 0x0
+					0x024 0x7e 0x0
+					0x028 0x15 0x0
+					0x244 0x02 0x0
+					0x2a4 0x12 0x0
+					0x260 0x10 0x0
+					0x28c 0x06 0x0
+					0x504 0x03 0x0
+					0x500 0x1c 0x0
+					0x50c 0x14 0x0
+					0x4d4 0x0e 0x0
+					0x4d8 0x04 0x0
+					0x4dc 0x1a 0x0
+					0x434 0x4b 0x0
+					0x414 0x04 0x0
+					0x40c 0x04 0x0
+					0x4f8 0x71 0x0
+					0x564 0x59 0x0
+					0x568 0x59 0x0
+					0x4fc 0x80 0x0
+					0x51c 0x40 0x0
+					0x444 0x71 0x0
+					0x43c 0x40 0x0
+					0x854 0x04 0x0
+					0x62c 0x52 0x0
+					0x654 0x50 0x0
+					0x65c 0x1a 0x0
+					0x660 0x06 0x0
+					0x8c8 0x83 0x0
+					0x8cc 0x09 0x0
+					0x8d0 0xa2 0x0
+					0x8d4 0x40 0x0
+					0x8c4 0x02 0x0
+					0x9ac 0x00 0x0
+					0x8a0 0x01 0x0
+					0x9e0 0x00 0x0
+					0x9dc 0x20 0x0
+					0x9a8 0x00 0x0
+					0x8a4 0x01 0x0
+					0x8a8 0x73 0x0
+					0x9d8 0xaa 0x0
+					0x9b0 0x03 0x0
+					0xa0c 0x0d 0x0
+					0x86c 0x00 0x0
+					0x644 0x00 0x0
+					0x804 0x03 0x0
+					0x800 0x00 0x0
+					0x808 0x03 0x0>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_default>;
+
+		perst-gpio = <&tlmm 35 0>;
+		wake-gpio = <&tlmm 37 0>;
+
+		gdsc-vdd-supply = <&pcie_0_gdsc>;
+		vreg-1.8-supply = <&pm8998_l26>;
+		vreg-0.9-supply = <&pm8998_l1>;
+		vreg-cx-supply = <&pm8998_s9_level>;
+
+		qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+		qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
+		qcom,vreg-cx-voltage-level = <RPMH_REGULATOR_LEVEL_MAX
+						RPMH_REGULATOR_LEVEL_SVS 0>;
+
+		qcom,l1-supported;
+		qcom,l1ss-supported;
+		qcom,aux-clk-sync;
+
+		qcom,ep-latency = <10>;
+
+		qcom,boot-option = <0x1>;
+
+		linux,pci-domain = <0>;
+
+		qcom,msi-gicm-addr = <0x17a00040>;
+		qcom,msi-gicm-base = <0x2c0>;
+
+		qcom,pcie-phy-ver = <0x30>;
+		qcom,use-19p2mhz-aux-clk;
+
+		qcom,smmu-sid-base = <0x1c10>;
+
+		iommu-map = <0x100 &apps_smmu 0x1c11 0x1>,
+			<0x200 &apps_smmu 0x1c12 0x1>,
+			<0x300 &apps_smmu 0x1c13 0x1>,
+			<0x400 &apps_smmu 0x1c14 0x1>,
+			<0x500 &apps_smmu 0x1c15 0x1>,
+			<0x600 &apps_smmu 0x1c16 0x1>,
+			<0x700 &apps_smmu 0x1c17 0x1>,
+			<0x800 &apps_smmu 0x1c18 0x1>,
+			<0x900 &apps_smmu 0x1c19 0x1>,
+			<0xa00 &apps_smmu 0x1c1a 0x1>,
+			<0xb00 &apps_smmu 0x1c1b 0x1>,
+			<0xc00 &apps_smmu 0x1c1c 0x1>,
+			<0xd00 &apps_smmu 0x1c1d 0x1>,
+			<0xe00 &apps_smmu 0x1c1e 0x1>,
+			<0xf00 &apps_smmu 0x1c1f 0x1>;
+
+		qcom,msm-bus,name = "pcie0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<45 512 0 0>,
+				<45 512 500 800>;
+
+		clocks = <&clock_gcc GCC_PCIE_0_PIPE_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_PCIE_0_AUX_CLK>,
+			<&clock_gcc GCC_PCIE_0_CFG_AHB_CLK>,
+			<&clock_gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+			<&clock_gcc GCC_PCIE_0_SLV_AXI_CLK>,
+			<&clock_gcc GCC_PCIE_0_CLKREF_CLK>,
+			<&clock_gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+			<&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+			<&clock_gcc GCC_PCIE_PHY_REFGEN_CLK>,
+			<&clock_gcc GCC_PCIE_PHY_AUX_CLK>;
+
+		clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo", "pcie_0_slv_q2a_axi_clk",
+				"pcie_tbu_clk", "pcie_phy_refgen_clk",
+				"pcie_phy_aux_clk";
+
+		max-clock-frequency-hz = <0>, <0>, <19200000>, <0>, <0>,
+					<0>, <0>, <0>, <0>, <100000000>, <0>;
+
+		resets = <&clock_gcc GCC_PCIE_0_BCR>,
+			<&clock_gcc GCC_PCIE_0_PHY_BCR>;
+
+		reset-names = "pcie_0_core_reset",
+				"pcie_0_phy_reset";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index d6af58b..947262fb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -160,6 +160,47 @@
 			};
 		};
 
+		pcie0 {
+			pcie0_clkreq_default: pcie0_clkreq_default {
+				mux {
+					pins = "gpio36";
+					function = "pci_e0";
+				};
+
+				config {
+					pins = "gpio36";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_perst_default: pcie0_perst_default {
+				mux {
+					pins = "gpio35";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio35";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			pcie0_wake_default: pcie0_wake_default {
+				mux {
+					pins = "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio37";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
 		cdc_reset_ctrl {
 			cdc_reset_sleep: cdc_reset_sleep {
 				mux {
@@ -417,6 +458,58 @@
 			};
 		};
 
+		sde_dp_aux_active: sde_dp_aux_active {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		sde_dp_aux_suspend: sde_dp_aux_suspend {
+			mux {
+				pins = "gpio43", "gpio51";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio43", "gpio51";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		sde_dp_usbplug_cc_active: sde_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		sde_dp_usbplug_cc_suspend: sde_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
@@ -2558,6 +2651,18 @@
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
+
+		trigout_a: trigout_a {
+			mux {
+				pins = "gpio62", "gpio51";
+				function = "qdss_cti";
+			};
+			config {
+				pins = "gpio62", "gpio51";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 1d5bf3a..1ac661d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -10,18 +10,57 @@
  * GNU General Public License for more details.
  */
 
+#include "smb1355.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
 /{
 	qrd_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
 		#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
 	};
+
+	aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&qupv3_se6_4uart {
+	status = "ok";
 };
 
 &pmi8998_fg {
 	qcom,battery-data = <&qrd_batterydata>;
 };
 
+&smb1355_charger {
+	status = "ok";
+};
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
+
 &soc {
 	sound-tavil {
 		qcom,wsa-max-devs = <1>;
@@ -29,3 +68,82 @@
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
 	};
 };
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l20>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&ufsphy_card {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_card {
+	vdd-hba-supply = <&ufs_card_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm8998_l21>;
+	vccq2-supply = <&pm8998_s4>;
+	vcc-max-microamp = <300000>;
+	vccq2-max-microamp = <300000>;
+
+	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
+
+&labibb {
+	status = "ok";
+	qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index dd0d08e..e5d1a74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -30,9 +30,11 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_4uart_active>;
 		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
-		interrupts = <GIC_SPI 607 0>;
+		interrupts-extended = <&intc GIC_SPI 607 0>,
+				<&tlmm 48 0>;
 		status = "disabled";
 		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
+		qcom,wakeup-byte = <0xFD>;
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
@@ -46,9 +48,11 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_4uart_active>;
 		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
-		interrupts = <GIC_SPI 608 0>;
+		interrupts-extended = <&intc GIC_SPI 608 0>,
+				<&tlmm 96 0>;
 		status = "disabled";
 		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
+		qcom,wakeup-byte = <0xFD>;
 	};
 
 	/* I2C */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 6989f326..79ac3b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -47,7 +47,7 @@
 			reg = <0x3800 0x100>;
 			regulator-name = "pm8998_s13";
 			regulator-min-microvolt = <568000>;
-			regulator-max-microvolt = <928000>;
+			regulator-max-microvolt = <996000>;
 			qcom,enable-time = <500>;
 			regulator-always-on;
 		};
@@ -74,11 +74,12 @@
 		qcom,cpr-step-quot-init-min = <11>;
 		qcom,cpr-step-quot-init-max = <12>;
 		qcom,cpr-count-mode = <0>;		/* All at once */
-		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-count-repeat = <20>;
 		qcom,cpr-down-error-step-limit = <1>;
 		qcom,cpr-up-error-step-limit = <1>;
 		qcom,cpr-corner-switch-delay-time = <1042>;
 		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
 
 		qcom,voltage-step = <4000>;
 		qcom,voltage-base = <352000>;
@@ -97,46 +98,52 @@
 			"APSS_SILVER_CPRH_STATUS_1",
 			"SILVER_SAW4_PMIC_STS";
 
-		qcom,cpr-aging-ref-voltage = <928000>;
+		qcom,cpr-aging-ref-voltage = <996000>;
 		vdd-supply = <&pm8998_s13>;
 
 		thread@1 {
 			qcom,cpr-thread-id = <1>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
 			apc0_pwrcl_vreg: regulator {
 				regulator-name = "apc0_pwrcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <17>;
+				regulator-max-microvolt = <19>;
 
-				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <8>;
-				qcom,cpr-speed-bins = <1>;
-				qcom,cpr-speed-bin-corners = <17>;
-				qcom,cpr-corners = <17>;
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <19 19>;
+				qcom,cpr-corners = <19>;
 
-				qcom,cpr-corner-fmax-map = <6 12 17>;
+				qcom,cpr-corner-fmax-map = <6 12 17 19>;
 
 				qcom,cpr-voltage-ceiling =
 					<872000  872000  872000  872000  872000
 					 872000  872000  872000  872000  872000
 					 872000  872000  872000  872000  872000
-					 872000  928000>;
+					 872000  928000  996000  996000>;
 
 				qcom,cpr-voltage-floor =
+					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  584000
 					 584000  584000  632000  632000  632000
-					 632000  672000>;
+					 632000  672000  996000  996000>,
+					/* Speed bin 1 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
+					 632000  672000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
-					 32000  32000>;
+					 32000  32000  40000  40000>;
 
 				qcom,corner-frequencies =
 					<300000000  422400000  499200000
@@ -144,7 +151,8 @@
 					 825600000  902400000  979200000
 					1056000000 1132800000 1209600000
 					1286400000 1363200000 1440000000
-					1516800000 1593600000>;
+					1516800000 1593600000 1651200000
+					1708800000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2594 2795 2576 2761 2469 2673 2198
@@ -155,22 +163,28 @@
 					 2043 2947>,
 					<2259 2389 2387 2531 2294 2464 2218
 					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
 					 1950 2632>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					<100000 100000 100000 100000>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					<100000 100000 100000 100000>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <17>;
+				qcom,cpr-aging-ref-corner = <19>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -180,39 +194,48 @@
 		thread@0 {
 			qcom,cpr-thread-id = <0>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
 			apc0_l3_vreg: regulator {
 				regulator-name = "apc0_l3_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <9>;
+				regulator-max-microvolt = <11>;
 
-				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <8>;
-				qcom,cpr-speed-bins = <1>;
-				qcom,cpr-speed-bin-corners = <9>;
-				qcom,cpr-corners = <9>;
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <11 11>;
+				qcom,cpr-corners = <11>;
 
-				qcom,cpr-corner-fmax-map = <4 7 9>;
+				qcom,cpr-corner-fmax-map = <4 7 9 11>;
 
 				qcom,cpr-voltage-ceiling =
 					<872000  872000  872000  872000  872000
-					 872000  872000  872000  928000>;
+					 872000  872000  872000  928000  996000
+					 996000>;
 
 				qcom,cpr-voltage-floor =
+					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
-					 584000  584000  632000  672000>;
+					 584000  584000  632000  672000  996000
+					 996000>,
+					/* Speed bin 1 */
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000  712000
+					 712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					<32000  32000  32000  32000  32000
-					 32000  32000  32000  32000>;
+					 32000  32000  32000  32000  40000
+					 40000>;
 
 				qcom,corner-frequencies =
 					<300000000  422400000  499200000
 					 576000000  652800000  729600000
-					 806400000  883200000  960000000>;
+					 806400000  883200000  960000000
+					1036800000 1094400000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -223,22 +246,28 @@
 					 3164 2656>,
 					<2439 2577 2552 2667 2461 2577 2394
 					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
 					 2501 2095>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					<100000 100000 100000 100000>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					<100000 100000 100000 100000>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <9>;
+				qcom,cpr-aging-ref-corner = <11>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -264,11 +293,12 @@
 		qcom,cpr-step-quot-init-min = <9>;
 		qcom,cpr-step-quot-init-max = <14>;
 		qcom,cpr-count-mode = <0>;		/* All at once */
-		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-count-repeat = <20>;
 		qcom,cpr-down-error-step-limit = <1>;
 		qcom,cpr-up-error-step-limit = <1>;
 		qcom,cpr-corner-switch-delay-time = <1042>;
 		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
 
 		qcom,apm-threshold-voltage = <800000>;
 		qcom,apm-crossover-voltage = <880000>;
@@ -296,46 +326,75 @@
 		thread@0 {
 			qcom,cpr-thread-id = <0>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
 			apc1_perfcl_vreg: regulator {
 				regulator-name = "apc1_perfcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <24>;
+				regulator-max-microvolt = <26>;
 
 				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <8>;
-				qcom,cpr-speed-bins = <1>;
-				qcom,cpr-speed-bin-corners = <22>;
-				qcom,cpr-corners = <22>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <22 24>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<22 22 22 22 22 22 22 22>,
+					/* Speed bin 1 */
+					<24 24 24 24 24 24 24 24>;
 
 				qcom,cpr-corner-fmax-map =
-					<10 17 22>;
+					/* Speed bin 0 */
+					<10 17 22>,
+					/* Speed bin 1 */
+					<10 17 24>;
 
 				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000>;
+					1056000 1056000>,
+					/* Speed bin 1 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1056000 1056000 1056000 1056000>;
 
 				qcom,cpr-voltage-floor =
+					/* Speed bin 0 */
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
 					 584000  584000  632000  632000  632000
 					 632000  632000  672000  712000  712000
-					 772000  772000>;
+					 772000  772000>,
+					/* Speed bin 1 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000  772000  772000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  40000  40000  40000
-					 40000  40000>;
+					 40000  40000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  729600000
 					 806400000  883200000  960000000
@@ -343,7 +402,16 @@
 					1267200000 1344000000 1420800000
 					1497600000 1574400000 1651200000
 					1728000000 1804800000 1881600000
-					1958400000>;
+					1958400000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000 2035200000 2092800000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -367,9 +435,12 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <22>;
+				qcom,cpr-aging-ref-corner = <22 24>;
 				qcom,cpr-aging-ro-scaling-factor = <1700>;
 				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index e56073c..74bb133 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -107,8 +107,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -116,6 +116,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_sharp_4k_dsc_video>;
 		vddio-supply = <&pm8998_l14>;
@@ -130,8 +131,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -139,6 +140,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_sharp_4k_dsc_cmd>;
 		vddio-supply = <&pm8998_l14>;
@@ -153,8 +155,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -162,6 +164,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_sharp_1080_cmd>;
 		vddio-supply = <&pm8998_l14>;
@@ -176,8 +179,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -185,6 +188,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_dual_sharp_1080_120hz_cmd>;
 		vddio-supply = <&pm8998_l14>;
@@ -222,8 +226,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -231,6 +235,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_dual_nt35597_truly_cmd>;
 		vddio-supply = <&pm8998_l14>;
@@ -245,8 +250,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -254,6 +259,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_cmd>;
 		vddio-supply = <&pm8998_l14>;
@@ -268,8 +274,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -277,6 +283,7 @@
 		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
 		qcom,platform-te-gpio = <&tlmm 10 0>;
 		qcom,platform-reset-gpio = <&tlmm 6 0>;
+		qcom,panel-mode-gpio = <&tlmm 52 0>;
 
 		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_video>;
 		vddio-supply = <&pm8998_l14>;
@@ -284,6 +291,78 @@
 		ibb-supply = <&ibb_regulator>;
 	};
 
+	dsi_sim_vid_display: qcom,dsi-display@8 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_vid>;
+	};
+
+	dsi_dual_sim_vid_display: qcom,dsi-display@9 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_vid>;
+	};
+
+	dsi_sim_cmd_display: qcom,dsi-display@10 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_cmd>;
+	};
+
+	dsi_dual_sim_cmd_display: qcom,dsi-display@11 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_cmd>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -292,47 +371,94 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_dual_nt35597_truly_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_video_display>;
 };
 
 &dsi_dual_nt35597_truly_video {
-	qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_dual_nt35597_truly_cmd {
-	qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_nt35597_truly_dsc_cmd {
-	qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,display-topology = <2 2 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_nt35597_truly_dsc_video {
-	qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,display-topology = <2 2 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_sharp_4k_dsc_video {
-	qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
-	qcom,mdss-dsi-t-clk-post = <0x0a>;
-	qcom,mdss-dsi-t-clk-pre = <0x1e>;
+	qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x27>;
+	qcom,display-topology = <2 2 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_sharp_4k_dsc_cmd {
-	qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
-	qcom,mdss-dsi-t-clk-post = <0x0a>;
-	qcom,mdss-dsi-t-clk-pre = <0x1e>;
+	qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x27>;
+	qcom,display-topology = <2 2 2>;
+	qcom,default-topology-index = <0>;
 };
 
 &dsi_dual_sharp_1080_120hz_cmd {
-	qcom,mdss-dsi-panel-timings = [00 24 09 09 26 24 09 09 06 03 04];
+	qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09 09 06 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0f>;
 	qcom,mdss-dsi-t-clk-pre = <0x36>;
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
+};
+
+&dsi_sharp_1080_cmd {
+	qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+	qcom,default-topology-index = <0>;
+};
+
+&dsi_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index cb5d924..df2e0c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -37,8 +37,8 @@
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
-		iommus = <&apps_smmu 0x880>, <&apps_smmu 0x888>,
-			<&apps_smmu 0xc80>, <&apps_smmu 0xc88>;
+		iommus = <&apps_smmu 0x880 0x8>,
+			<&apps_smmu 0xc80 0x8>;
 
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -97,8 +97,6 @@
 					1 5 9 13>;
 		qcom,sde-sspp-excl-rect = <1 1 1 1
 						1 1 1 1>;
-		qcom,sde-sspp-smart-dma-priority = <5 6 7 8 1 2 3 4>;
-		qcom,sde-smart-dma-rev = "smart_dma_v2";
 
 		qcom,sde-mixer-pair-mask = <2 1 6 0 0 3>;
 
@@ -118,10 +116,12 @@
 		qcom,sde-wb-linewidth = <4096>;
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-version = <0x200>;
 		qcom,sde-panic-per-pipe;
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
 		qcom,sde-has-dim-layer;
+		qcom,sde-has-idle-pc;
 		qcom,sde-max-bw-low-kbps = <9600000>;
 		qcom,sde-max-bw-high-kbps = <9600000>;
 		qcom,sde-dram-channels = <2>;
@@ -178,7 +178,6 @@
 	};
 
 	sde_rscc: qcom,sde_rscc@af20000 {
-		status = "disabled";
 		cell-index = <0>;
 		compatible = "qcom,sde-rsc";
 		reg = <0xaf20000 0x1c44>,
@@ -187,13 +186,16 @@
 		qcom,sde-rsc-version = <1>;
 
 		vdd-supply = <&mdss_core_gdsc>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
-		clock-names = "iface_clk", "vsync_clk";
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>;
+		clock-names = "vsync_clk", "iface_clk";
 		clock-rate = <0 0>;
 
 		qcom,sde-dram-channels = <2>;
 
+		mboxes = <&disp_rsc 0>;
+		mbox-names = "disp_rsc";
+
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
 			qcom,msm-bus,name = "disp_rsc";
@@ -208,7 +210,6 @@
 	};
 
 	mdss_rotator: qcom,mdss_rotator@ae00000 {
-		status = "disabled";
 		compatible = "qcom,sde_rotator";
 		reg = <0x0ae00000 0xac000>,
 		      <0x0aeb8000 0x3000>;
@@ -219,8 +220,6 @@
 
 		qcom,mdss-rot-mode = <1>;
 		qcom,mdss-highest-bank-bit = <0x2>;
-		qcom,sde-ubwc-malsize = <1>;
-		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -238,12 +237,10 @@
 			<&clock_gcc GCC_DISP_AHB_CLK>,
 			<&clock_gcc GCC_DISP_AXI_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_ROT_CLK_SRC>,
 			<&clock_dispcc DISP_CC_MDSS_ROT_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>;
 		clock-names = "gcc_iface", "gcc_bus",
-			"iface_clk", "rot_core_clk",
-			"rot_clk", "axi_clk";
+			"iface_clk", "rot_clk", "axi_clk";
 
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
@@ -260,15 +257,12 @@
 
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
-			iommus = <&apps_smmu 0x1090>;
-			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+			iommus = <&apps_smmu 0x1090 0x0>;
 		};
 
 		smmu_rot_sec: qcom,smmu_rot_sec_cb {
-			status = "disabled";
 			compatible = "qcom,smmu_sde_rot_sec";
-			iommus = <&apps_smmu 0x1091>;
-			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+			iommus = <&apps_smmu 0x1091 0x0>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index c80343a..aac63ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -303,6 +303,13 @@
 		qcom,reset-ep-after-lpm-resume;
 	};
 
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&apps_smmu 0x182c>;
+		qcom,usb-audio-stream-id = <0xc>;
+		qcom,usb-audio-intr-num = <2>;
+	};
+
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 4fdf383..efd8c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -16,3 +16,7 @@
 	model = "Qualcomm Technologies, Inc. SDM845 V2";
 	qcom,msm-id = <321 0x20000>;
 };
+
+&spmi_debug_bus {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index b9dc816..4fe9282 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -97,9 +97,8 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
 			iommus =
-				<&apps_smmu 0x10a0>,
-				<&apps_smmu 0x10a8>,
-				<&apps_smmu 0x10b0>;
+				<&apps_smmu 0x10a0 0x8>,
+				<&apps_smmu 0x10b0 0x0>;
 			buffer-types = <0xfff>;
 			virtual-addr-pool = <0x70800000 0x6f800000>;
 		};
@@ -108,10 +107,8 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_bitstream";
 			iommus =
-				<&apps_smmu 0x10a1>,
-				<&apps_smmu 0x10a9>,
-				<&apps_smmu 0x10a5>,
-				<&apps_smmu 0x10ad>;
+				<&apps_smmu 0x10a1 0x8>,
+				<&apps_smmu 0x10a5 0x8>;
 			buffer-types = <0x241>;
 			virtual-addr-pool = <0x4b000000 0x25800000>;
 			qcom,secure-context-bank;
@@ -121,8 +118,7 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_pixel";
 			iommus =
-				<&apps_smmu 0x10a3>,
-				<&apps_smmu 0x10ab>;
+				<&apps_smmu 0x10a3 0x8>;
 			buffer-types = <0x106>;
 			virtual-addr-pool = <0x25800000 0x25800000>;
 			qcom,secure-context-bank;
@@ -132,9 +128,8 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_non_pixel";
 			iommus =
-				<&apps_smmu 0x10a4>,
-				<&apps_smmu 0x10ac>,
-				<&apps_smmu 0x10b4>;
+				<&apps_smmu 0x10a4 0x8>,
+				<&apps_smmu 0x10b4 0x0>;
 			buffer-types = <0x480>;
 			virtual-addr-pool = <0x1000000 0x24800000>;
 			qcom,secure-context-bank;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 54e0162..67e0493 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -34,6 +34,7 @@
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
 		ufshc2 = &ufshc_card; /* Removable UFS slot */
+		pci-domain0 = &pcie0;
 		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
 	};
 
@@ -52,6 +53,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs0>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_0>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -85,6 +87,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs0>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_100>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			L2_100: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -112,6 +115,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs0>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_200>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			L2_200: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -139,6 +143,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs0>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_300>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			L2_300: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -166,6 +171,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs1>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_400>;
+			sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
 			L2_400: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -193,6 +199,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs1>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_500>;
+			sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
 			L2_500: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -220,6 +227,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs1>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_600>;
+			sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
 			L2_600: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -247,6 +255,7 @@
 			qcom,lmh-dcvs = <&lmh_dcvs1>;
 			#cooling-cells = <2>;
 			next-level-cache = <&L2_700>;
+			sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
 			L2_700: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -302,6 +311,115 @@
 		};
 	};
 
+	energy-costs {
+		CPU_COST_0: core-cost0 {
+			busy-cost-data = <
+				 92   34 /*  300000 */
+				129   40 /*  422400 */
+				153   43 /*  499200 */
+				177   48 /*  576000 */
+				200   52 /*  652800 */
+				230   58 /*  748800 */
+				253   64 /*  825600 */
+				277   70 /*  902400 */
+				301   76 /*  979200 */
+				324   83 /* 1056000 */
+				348   90 /* 1132800 */
+				371   98 /* 1209600 */
+				395  105 /* 1286400 */
+				419  114 /* 1363200 */
+				442  123 /* 1440000 */
+				466  135 /* 1516800 */
+				490  152 /* 1593600 */
+			>;
+			idle-cost-data = <
+				22 18 14 12
+			>;
+		};
+		CPU_COST_1: core-cost1 {
+			busy-cost-data = <
+				156  240 /*  300000 */
+				220  247 /*  422400 */
+				261  252 /*  499200 */
+				301  257 /*  576000 */
+				341  264 /*  652800 */
+				381  272 /*  729600 */
+				421  281 /*  806400 */
+				461  292 /*  883200 */
+				501  306 /*  960000 */
+				542  324 /* 1036800 */
+				582  346 /* 1113600 */
+				622  373 /* 1190400 */
+				662  407 /* 1267200 */
+				702  450 /* 1344000 */
+				742  504 /* 1420800 */
+				783  570 /* 1497600 */
+				823  649 /* 1574400 */
+				863  743 /* 1651200 */
+				903  849 /* 1728000 */
+				943  960 /* 1804800 */
+				983 1062 /* 1881600 */
+			       1024 1131 /* 1958400 */
+			>;
+			idle-cost-data = <
+				520 500 480 460
+			>;
+		};
+		CLUSTER_COST_0: cluster-cost0 {
+			busy-cost-data = <
+				 92   3 /*  300000 */
+				129   4 /*  422400 */
+				153   4 /*  499200 */
+				177   4 /*  576000 */
+				200   5 /*  652800 */
+				230   5 /*  748800 */
+				253   6 /*  825600 */
+				277   7 /*  902400 */
+				301   7 /*  979200 */
+				324   8 /* 1056000 */
+				348   9 /* 1132800 */
+				371   9 /* 1209600 */
+				395  10 /* 1286400 */
+				419  11 /* 1363200 */
+				442  12 /* 1440000 */
+				466  13 /* 1516800 */
+				490  15 /* 1593600 */
+			>;
+			idle-cost-data = <
+				4 3 2 1
+			>;
+		};
+		CLUSTER_COST_1: cluster-cost1 {
+			busy-cost-data = <
+				156  24 /*  300000 */
+				220  24 /*  422400 */
+				261  25 /*  499200 */
+				301  25 /*  576000 */
+				341  26 /*  652800 */
+				381  27 /*  729600 */
+				421  28 /*  806400 */
+				461  29 /*  883200 */
+				501  30 /*  960000 */
+				542  32 /* 1036800 */
+				582  34 /* 1113600 */
+				622  37 /* 1190400 */
+				662  40 /* 1267200 */
+				702  45 /* 1344000 */
+				742  50 /* 1420800 */
+				783  57 /* 1497600 */
+				823  64 /* 1574400 */
+				863  74 /* 1651200 */
+				903  84 /* 1728000 */
+				943  96 /* 1804800 */
+				983 106 /* 1881600 */
+			       1024 113 /* 1958400 */
+			>;
+			idle-cost-data = <
+				4 3 2 1
+			>;
+		};
+	}; /* energy-costs */
+
 	psci {
 		compatible = "arm,psci-1.0";
 		method = "smc";
@@ -553,6 +671,7 @@
 		qcom,fuse-disable-bit = <12>;
 		#address-cells = <2>;
 		#size-cells = <0>;
+		status = "disabled";
 
 		qcom,pm8998-debug@0 {
 			compatible = "qcom,spmi-pmic";
@@ -757,6 +876,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -774,6 +894,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -819,6 +940,13 @@
 		interrupts = <1 5 4>;
 	};
 
+	clock_rpmh: qcom,rpmhclk {
+		compatible = "qcom,rpmh-clk-sdm845";
+		#clock-cells = <1>;
+		mboxes = <&apps_rsc 0>;
+		mbox-names = "apps";
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdm845", "syscon";
 		reg = <0x100000 0x1f0000>;
@@ -878,6 +1006,11 @@
 		#reset-cells = <1>;
 	};
 
+	cpucc_debug: syscon@17970018 {
+		compatible = "syscon";
+		reg = <0x17970018 0x4>;
+	};
+
 	clock_cpucc: qcom,cpucc@0x17d41000 {
 		compatible = "qcom,clk-cpu-osm";
 		reg = <0x17d41000 0x1400>,
@@ -993,13 +1126,6 @@
 		#reset-cells = <1>;
 	};
 
-	clock_rpmh: qcom,rpmhclk {
-		compatible = "qcom,rpmh-clk-sdm845";
-		#clock-cells = <1>;
-		mboxes = <&apps_rsc 0>;
-		mbox-names = "apps";
-	};
-
 	clock_debug: qcom,cc-debug@100000 {
 		compatible = "qcom,debugcc-sdm845";
 		qcom,cc-count = <5>;
@@ -1008,6 +1134,7 @@
 		qcom,camcc = <&clock_camcc>;
 		qcom,dispcc = <&clock_dispcc>;
 		qcom,gpucc = <&clock_gpucc>;
+		qcom,cpucc = <&cpucc_debug>;
 		clock-names = "xo_clk_src";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		#clock-cells = <1>;
@@ -1020,6 +1147,29 @@
 		mbox-names = "qdss_clk";
 	};
 
+	ufs_ice: ufsice@1d90000 {
+		compatible = "qcom,ice";
+		reg = <0x1d90000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names = "ufs_core_clk", "bus_clk",
+				"iface_clk", "ice_core_clk";
+		clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			 <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+			 <&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			 <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+		qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+		vdd-hba-supply = <&ufs_phy_gdsc>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xda8>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1032,17 +1182,18 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
 
-	ufshc_mem: ufshc_mem@1d84000 {
+	ufshc_mem: ufshc@1d84000 {
 		compatible = "qcom,ufshc";
 		reg = <0x1d84000 0x2500>;
 		interrupts = <0 265 0>;
 		phys = <&ufsphy_mem>;
 		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <2>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -1057,13 +1208,12 @@
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk",
 			"rx_lane1_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_PHY_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
-			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1152,7 +1302,7 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
@@ -1176,13 +1326,12 @@
 			"ref_clk",
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_CARD_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_CLK>,
+			<&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
-			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
@@ -1289,9 +1438,12 @@
 		      <0x1f65000 0x008>,
 		      <0x1f64000 0x008>,
 		      <0x4180000 0x020>,
-		      <0xc2b0000 0x004>;
+		      <0xc2b0000 0x004>,
+		      <0xb2e0100 0x004>,
+		      <0x4180044 0x004>;
 		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
-			    "halt_nc", "rmb_base", "restart_reg";
+			    "halt_nc", "rmb_base", "restart_reg",
+			    "pdc_sync", "alt_reset";
 
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			 <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
@@ -1418,6 +1570,7 @@
 	};
 
 	slim_qca: slim@17240000 {
+		status = "ok";
 		cell-index = <3>;
 		compatible = "qcom,slim-ngd";
 		reg = <0x17240000 0x2c000>,
@@ -1425,6 +1578,14 @@
 		reg-names = "slimbus_physical", "slimbus_bam_physical";
 		interrupts = <0 291 0>, <0 292 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+
+		/* Slimbus Slave DT for WCN3990 */
+		btfmslim_codec: wcn3990 {
+			compatible = "qcom,btfmslim_slave";
+			elemental-addr = [00 01 20 02 17 02];
+			qcom,btfm-slim-ifd = "btfmslim_slave_ifd";
+			qcom,btfm-slim-ifd-elemental-addr = [00 00 20 02 17 02];
+		};
 	};
 
 	eud: qcom,msm-eud@88e0000 {
@@ -1536,76 +1697,64 @@
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1401>,
-				 <&apps_smmu 0x1421>;
+			iommus = <&apps_smmu 0x1401 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb2 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1402>,
-				 <&apps_smmu 0x1422>;
+			iommus = <&apps_smmu 0x1402 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb3 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1403>,
-				 <&apps_smmu 0x1423>;
+			iommus = <&apps_smmu 0x1403 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb4 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1404>,
-				 <&apps_smmu 0x1424>;
+			iommus = <&apps_smmu 0x1404 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb5 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1405>,
-				 <&apps_smmu 0x1425>;
+			iommus = <&apps_smmu 0x1405 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb6 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1406>,
-				 <&apps_smmu 0x1426>;
+			iommus = <&apps_smmu 0x1406 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb7 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1407>,
-				 <&apps_smmu 0x1427>;
+			iommus = <&apps_smmu 0x1407 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb8 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1408>,
-				 <&apps_smmu 0x1428>;
+			iommus = <&apps_smmu 0x1408 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb9 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x1409>,
-				 <&apps_smmu 0x1419>,
-				 <&apps_smmu 0x1429>;
+			iommus = <&apps_smmu 0x1409 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb10 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x140A>,
-				 <&apps_smmu 0x141A>,
-				 <&apps_smmu 0x142A>;
+			iommus = <&apps_smmu 0x140A 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb11 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "adsprpc-smd";
-			iommus = <&apps_smmu 0x1823>;
+			iommus = <&apps_smmu 0x1823 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb12 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "adsprpc-smd";
-			iommus = <&apps_smmu 0x1824>;
+			iommus = <&apps_smmu 0x1824 0x0>;
 		};
 	};
 
@@ -1640,6 +1789,11 @@
 			compatible = "qcom,msm-imem-kaslr_offset";
 			reg = <0x6d0 12>;
 		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
 	};
 
 	qcom,venus@aae0000 {
@@ -2147,6 +2301,65 @@
 		hyplog-size-offset = <0x414>;
 	};
 
+	qcom_cedev: qcedev@1de0000 {
+		compatible = "qcom,qcedev";
+		reg = <0x1de0000 0x20000>,
+			<0x1dc4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 272 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+		qcom,bam-ee = <0>;
+		qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<125 512 0 0>,
+				<125 512 393600 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc GCC_CE1_CLK>,
+			 <&clock_gcc GCC_CE1_CLK>,
+			 <&clock_gcc GCC_CE1_AHB_CLK>,
+			 <&clock_gcc GCC_CE1_AXI_CLK>;
+		qcom,ce-opp-freq = <171430000>;
+	};
+
+	qcom_crypto: qcrypto@1de0000 {
+		compatible = "qcom,qcrypto";
+		reg = <0x1de0000 0x20000>,
+			 <0x1dc4000 0x24000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 272 0>;
+		qcom,bam-pipe-pair = <2>;
+		qcom,ce-hw-instance = <0>;
+		qcom,ce-device = <0>;
+		qcom,bam-ee = <0>;
+		qcom,ce-hw-shared;
+		qcom,clk-mgmt-sus-res;
+		qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<125 512 0 0>,
+			<125 512 393600 393600>;
+		clock-names = "core_clk_src", "core_clk",
+				"iface_clk", "bus_clk";
+		clocks = <&clock_gcc GCC_CE1_CLK>,
+			 <&clock_gcc GCC_CE1_CLK>,
+			 <&clock_gcc GCC_CE1_AHB_CLK>,
+			 <&clock_gcc GCC_CE1_AXI_CLK>;
+		qcom,ce-opp-freq = <171430000>;
+		qcom,use-sw-aes-cbc-ecb-ctr-algo;
+		qcom,use-sw-aes-xts-algo;
+		qcom,use-sw-aes-ccm-algo;
+		qcom,use-sw-ahash-algo;
+		qcom,use-sw-aead-algo;
+		qcom,use-sw-hmac-algo;
+	};
+
 	qcom,msm_gsi {
 		compatible = "qcom,msm_gsi";
 	};
@@ -2300,18 +2513,18 @@
 
 		ipa_smmu_ap: ipa_smmu_ap {
 			compatible = "qcom,ipa-smmu-ap-cb";
-			iommus = <&apps_smmu 0x720>;
+			iommus = <&apps_smmu 0x720 0x0>;
 			qcom,iova-mapping = <0x20000000 0x40000000>;
 		};
 
 		ipa_smmu_wlan: ipa_smmu_wlan {
 			compatible = "qcom,ipa-smmu-wlan-cb";
-			iommus = <&apps_smmu 0x721>;
+			iommus = <&apps_smmu 0x721 0x0>;
 		};
 
 		ipa_smmu_uc: ipa_smmu_uc {
 			compatible = "qcom,ipa-smmu-uc-cb";
-			iommus = <&apps_smmu 0x722>;
+			iommus = <&apps_smmu 0x722 0x0>;
 			qcom,iova-mapping = <0x40000000 0x20000000>;
 		};
 	};
@@ -2355,7 +2568,7 @@
 
 	cmd_db: qcom,cmd-db@861e0000 {
 		compatible = "qcom,cmd-db";
-		reg = <0x861e0000 0x4000>;
+		reg = <0xc3f000c 8>;
 	};
 
 	dcc: dcc_v2@10a2000 {
@@ -2378,8 +2591,7 @@
 		      <0xa0000000 0x10000000>,
 		      <0xb0000000 0x10000>;
 		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
-		iommus = <&apps_smmu 0x0040>,
-			 <&apps_smmu 0x0041>;
+		iommus = <&apps_smmu 0x0040 0x1>;
 		interrupts = <0 414 0 /* CE0 */ >,
 			     <0 415 0 /* CE1 */ >,
 			     <0 416 0 /* CE2 */ >,
@@ -3422,14 +3634,14 @@
 &clock_cpucc {
 	lmh_dcvs0: qcom,limits-dcvs@0 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <0>;
 		#thermal-sensor-cells = <0>;
 	};
 
 	lmh_dcvs1: qcom,limits-dcvs@1 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <1>;
 		#thermal-sensor-cells = <0>;
 	};
@@ -3552,6 +3764,48 @@
 #include "sdm845-vidc.dtsi"
 #include "sdm845-pm.dtsi"
 #include "sdm845-pinctrl.dtsi"
+#include "sdm845-pcie.dtsi"
 #include "sdm845-audio.dtsi"
 #include "sdm845-gpu.dtsi"
 #include "sdm845-usb.dtsi"
+
+&pm8998_temp_alarm {
+	cooling-maps {
+		trip0_cpu0 {
+			trip = <&pm8998_trip0>;
+			cooling-device = <&CPU0 21 21>;
+		};
+		trip0_cpu4 {
+			trip = <&pm8998_trip0>;
+			cooling-device = <&CPU4 21 21>;
+		};
+		trip1_cpu1 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU1 22 22>;
+		};
+		trip1_cpu2 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU2 22 22>;
+		};
+		trip1_cpu3 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU3 22 22>;
+		};
+		trip1_cpu4 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU4 22 22>;
+		};
+		trip1_cpu5 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU5 22 22>;
+		};
+		trip1_cpu6 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU6 22 22>;
+		};
+		trip1_cpu7 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU7 22 22>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
new file mode 100644
index 0000000..33c5e97
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -0,0 +1,55 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&qupv3_se10_i2c {
+	smb1355: qcom,smb1355@8 {
+		compatible = "qcom,i2c-pmic";
+		reg = <0x8>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+		interrupt_names = "smb1355";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+
+		smb1355_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		smb1355_charger: qcom,smb1355-charger@1000 {
+			compatible = "qcom,smb1355";
+			qcom,pmic-revid = <&smb1355_revid>;
+			reg = <0x1000 0x700>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&smb1355>;
+			status = "disabled";
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "chg-state-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "wdog-bark";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fa6bae8..9b5de00 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -6,6 +6,7 @@
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
@@ -16,14 +17,13 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_SCHED_HMP=y
-CONFIG_SCHED_HMP_CSTATE_AWARE=y
 CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
@@ -49,6 +49,7 @@
 CONFIG_ARCH_SDM845=y
 CONFIG_ARCH_SDM830=y
 CONFIG_PCI=y
+CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
@@ -80,6 +81,7 @@
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -166,6 +168,7 @@
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
 CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
 CONFIG_IP_NF_MATCH_TTL=y
 CONFIG_IP_NF_FILTER=y
 CONFIG_IP_NF_TARGET_REJECT=y
@@ -181,6 +184,7 @@
 CONFIG_IP_NF_ARP_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
 CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
@@ -212,6 +216,7 @@
 CONFIG_NET_ACT_MIRRED=y
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
@@ -238,6 +243,7 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -298,8 +304,8 @@
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_SMB2=y
-CONFIG_SMB138X_CHARGER=y
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
@@ -336,6 +342,7 @@
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
 CONFIG_DRM=y
 CONFIG_DRM_SDE_EVTLOG_DEBUG=y
 CONFIG_DRM_SDE_RSC=y
@@ -445,6 +452,8 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
@@ -473,10 +482,13 @@
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_APSS_CORE_EA=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -536,7 +548,10 @@
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index a71aa64..615150a 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -5,6 +5,7 @@
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
@@ -20,14 +21,13 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_SCHED_HMP=y
-CONFIG_SCHED_HMP_CSTATE_AWARE=y
 CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
@@ -54,12 +54,14 @@
 CONFIG_ARCH_SDM845=y
 CONFIG_ARCH_SDM830=y
 CONFIG_PCI=y
+CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
@@ -85,6 +87,7 @@
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -172,6 +175,7 @@
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
 CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
 CONFIG_IP_NF_MATCH_TTL=y
 CONFIG_IP_NF_FILTER=y
 CONFIG_IP_NF_TARGET_REJECT=y
@@ -187,6 +191,7 @@
 CONFIG_IP_NF_ARP_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
 CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
@@ -220,6 +225,7 @@
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
 CONFIG_MSM_BT_POWER=y
@@ -248,6 +254,7 @@
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -306,8 +313,8 @@
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_QPNP_SMB2=y
-CONFIG_SMB138X_CHARGER=y
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
@@ -462,6 +469,8 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_MSM_GLADIATOR_HANG_DETECT=y
@@ -493,11 +502,14 @@
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -596,6 +608,7 @@
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
@@ -605,7 +618,10 @@
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ffbb9a5..875545d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
 #include <asm/bug.h>
 #include <asm/proc-fns.h>
 
+#include <asm/bug.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable-prot.h>
@@ -172,6 +173,34 @@
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+	pteval_t old = pte_val(*ptep);
+	pteval_t new = pte_val(pte);
+
+	/* Only problematic if valid -> valid */
+	if (!(old & new & PTE_VALID))
+		goto pte_ok;
+
+	/* Changing attributes should go via an invalid entry */
+	if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+		goto pte_bad;
+
+	/* Change of OA is only an issue if one mapping is writable */
+	if (!(old & new & PTE_RDONLY) &&
+	    WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+		goto pte_bad;
+
+	goto pte_ok;
+
+pte_bad:
+	*ptep = __pte(0);
+	dsb(ishst);
+	asm("tlbi	vmalle1is");
+	dsb(ish);
+	isb();
+pte_ok:
+#endif
+
 	*ptep = pte;
 
 	/*
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0c4a5ee..da845fd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -189,7 +189,7 @@
 	 * don't attempt to dump non-kernel addresses or
 	 * values that are probably just small negative numbers
 	 */
-	if (addr < PAGE_OFFSET || addr > -256UL)
+	if (addr < KIMAGE_VADDR || addr > -256UL)
 		return;
 
 	printk("\n%s: %#lx:\n", name, addr);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 40e775a..837bbab 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1953,20 +1953,32 @@
  *
  * Attaches specified io address space mapping to the provided device,
  * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. More than one client might be attached to
- * the same io address space mapping.
+ * IOMMU aware version. Only one device in an iommu_group may use this
+ * function.
  */
 int arm_iommu_attach_device(struct device *dev,
 			    struct dma_iommu_mapping *mapping)
 {
 	int err;
 	int s1_bypass = 0, is_fast = 0;
+	struct iommu_group *group;
+
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return -ENODEV;
+	}
+
+	if (iommu_get_domain_for_dev(dev)) {
+		dev_err(dev, "Device already attached to other iommu_domain\n");
+		return -EINVAL;
+	}
 
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
 	if (is_fast)
 		return fast_smmu_attach_device(dev, mapping);
 
-	err = iommu_attach_device(mapping->domain, dev);
+	err = iommu_attach_group(mapping->domain, group);
 	if (err)
 		return err;
 
@@ -1994,6 +2006,7 @@
 {
 	struct dma_iommu_mapping *mapping;
 	int is_fast, s1_bypass = 0;
+	struct iommu_group *group;
 
 	mapping = to_dma_iommu_mapping(dev);
 	if (!mapping) {
@@ -2013,7 +2026,13 @@
 	if (msm_dma_unmap_all_for_dev(dev))
 		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
 
-	iommu_detach_device(mapping->domain, dev);
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return;
+	}
+
+	iommu_detach_group(mapping->domain, group);
 	kref_put(&mapping->kref, release_iommu_mapping);
 	dev->archdata.mapping = NULL;
 	if (!s1_bypass)
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 804d2a2..dd6a18b 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -80,7 +80,7 @@
 		}
 
 		/* Sorted insert of 75th percentile into buf2 */
-		for (k = 0; k < i; ++k) {
+		for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
 			if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
 				l = min_t(unsigned int,
 					  i, ARRAY_SIZE(buf2) - 1);
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 6430bff..5c429d7 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -257,7 +257,7 @@
 	else if ((prog_req.fr1 && prog_req.frdefault) ||
 		 (prog_req.single && !prog_req.frdefault))
 		/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
-		state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+		state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
 					  cpu_has_mips_r2_r6) ?
 					  FP_FR1 : FP_FR0;
 	else if (prog_req.fr1)
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index de63d36..732d617 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -244,9 +244,6 @@
 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 {
 	int reg;
-	struct thread_info *ti = task_thread_info(p);
-	unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
-	struct pt_regs *regs = (struct pt_regs *)ksp - 1;
 #if (KGDB_GDB_REG_SIZE == 32)
 	u32 *ptr = (u32 *)gdb_regs;
 #else
@@ -254,25 +251,46 @@
 #endif
 
 	for (reg = 0; reg < 16; reg++)
-		*(ptr++) = regs->regs[reg];
+		*(ptr++) = 0;
 
 	/* S0 - S7 */
-	for (reg = 16; reg < 24; reg++)
-		*(ptr++) = regs->regs[reg];
+	*(ptr++) = p->thread.reg16;
+	*(ptr++) = p->thread.reg17;
+	*(ptr++) = p->thread.reg18;
+	*(ptr++) = p->thread.reg19;
+	*(ptr++) = p->thread.reg20;
+	*(ptr++) = p->thread.reg21;
+	*(ptr++) = p->thread.reg22;
+	*(ptr++) = p->thread.reg23;
 
 	for (reg = 24; reg < 28; reg++)
 		*(ptr++) = 0;
 
 	/* GP, SP, FP, RA */
-	for (reg = 28; reg < 32; reg++)
-		*(ptr++) = regs->regs[reg];
+	*(ptr++) = (long)p;
+	*(ptr++) = p->thread.reg29;
+	*(ptr++) = p->thread.reg30;
+	*(ptr++) = p->thread.reg31;
 
-	*(ptr++) = regs->cp0_status;
-	*(ptr++) = regs->lo;
-	*(ptr++) = regs->hi;
-	*(ptr++) = regs->cp0_badvaddr;
-	*(ptr++) = regs->cp0_cause;
-	*(ptr++) = regs->cp0_epc;
+	*(ptr++) = p->thread.cp0_status;
+
+	/* lo, hi */
+	*(ptr++) = 0;
+	*(ptr++) = 0;
+
+	/*
+	 * BadVAddr, Cause
+	 * Ideally these would come from the last exception frame up the stack
+	 * but that requires unwinding, otherwise we can't know much for sure.
+	 */
+	*(ptr++) = 0;
+	*(ptr++) = 0;
+
+	/*
+	 * PC
+	 * use return address (RA), i.e. the moment after return from resume()
+	 */
+	*(ptr++) = p->thread.reg31;
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1fb317f..b6802b9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -673,6 +673,14 @@
 	return pte_pfn(pte);
 }
 
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
+{
+	pte_t pte = __pte(pmd_val(pmd));
+
+	return pte_write(pte);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline unsigned long pmd_dirty(pmd_t pmd)
 {
@@ -688,13 +696,6 @@
 	return pte_young(pte);
 }
 
-static inline unsigned long pmd_write(pmd_t pmd)
-{
-	pte_t pte = __pte(pmd_val(pmd));
-
-	return pte_write(pte);
-}
-
 static inline unsigned long pmd_trans_huge(pmd_t pmd)
 {
 	pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 37aa537..bd7e2aa 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1495,7 +1495,7 @@
 	if ((long)addr < 0L) {
 		unsigned long pa = __pa(addr);
 
-		if ((addr >> max_phys_bits) != 0UL)
+		if ((pa >> max_phys_bits) != 0UL)
 			return false;
 
 		return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2..6bf09f5 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -983,6 +983,18 @@
 	unsigned long return_hooker = (unsigned long)
 				&return_to_handler;
 
+	/*
+	 * When resuming from suspend-to-ram, this function can be indirectly
+	 * called from early CPU startup code while the CPU is in real mode,
+	 * which would fail miserably.  Make sure the stack pointer is a
+	 * virtual address.
+	 *
+	 * This check isn't as accurate as virt_addr_valid(), but it should be
+	 * good enough for this purpose, and it's fast.
+	 */
+	if (unlikely((long)__builtin_frame_address(0) >= 0))
+		return;
+
 	if (unlikely(ftrace_graph_is_dead()))
 		return;
 
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index a95e1e5..4256d9b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -229,20 +229,22 @@
 static int fw_lookup_and_allocate_buf(const char *fw_name,
 				      struct firmware_cache *fwc,
 				      struct firmware_buf **buf, void *dbuf,
-				      size_t size)
+				      size_t size, unsigned int opt_flags)
 {
 	struct firmware_buf *tmp;
 
 	spin_lock(&fwc->lock);
-	tmp = __fw_lookup_buf(fw_name);
-	if (tmp) {
-		kref_get(&tmp->ref);
-		spin_unlock(&fwc->lock);
-		*buf = tmp;
-		return 1;
+	if (!(opt_flags & FW_OPT_NOCACHE)) {
+		tmp = __fw_lookup_buf(fw_name);
+		if (tmp) {
+			kref_get(&tmp->ref);
+			spin_unlock(&fwc->lock);
+			*buf = tmp;
+			return 1;
+		}
 	}
 	tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
-	if (tmp)
+	if (tmp && !(opt_flags & FW_OPT_NOCACHE))
 		list_add(&tmp->list, &fwc->head);
 	spin_unlock(&fwc->lock);
 
@@ -1051,7 +1053,8 @@
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-			  struct device *device, void *dbuf, size_t size)
+			  struct device *device, void *dbuf, size_t size,
+			  unsigned int opt_flags)
 {
 	struct firmware *firmware;
 	struct firmware_buf *buf;
@@ -1069,7 +1072,8 @@
 		return 0; /* assigned */
 	}
 
-	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
+	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size,
+					opt_flags);
 
 	/*
 	 * bind with 'buf' now to avoid warning in failure path
@@ -1147,7 +1151,8 @@
 		goto out;
 	}
 
-	ret = _request_firmware_prepare(&fw, name, device, buf, size);
+	ret = _request_firmware_prepare(&fw, name, device, buf, size,
+					opt_flags);
 	if (ret <= 0) /* error or already assigned */
 		goto out;
 
@@ -1173,11 +1178,11 @@
 	ret = fw_get_filesystem_firmware(device, fw->priv);
 	if (ret) {
 		if (!(opt_flags & FW_OPT_NO_WARN))
-			dev_warn(device,
-				 "Direct firmware load for %s failed with error %d\n",
+			dev_dbg(device,
+				 "Firmware %s was not found in kernel paths. rc:%d\n",
 				 name, ret);
 		if (opt_flags & FW_OPT_USERHELPER) {
-			dev_warn(device, "Falling back to user helper\n");
+			dev_dbg(device, "Falling back to user helper\n");
 			ret = fw_load_from_user_helper(fw, name, device,
 						       opt_flags, timeout);
 		}
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index b18a172..ee847d9f 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -306,7 +306,6 @@
 		.subsys = "slpi",
 		.link.link_info.edge = "dsps",
 		.link.link_info.transport = "smem",
-		.vmid = VMID_SSC_Q6,
 	},
 	{
 		.name = "cdsprpc-smd",
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149c..d0ac2d5 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -489,8 +489,7 @@
 int tpm_get_timeouts(struct tpm_chip *chip)
 {
 	struct tpm_cmd_t tpm_cmd;
-	unsigned long new_timeout[4];
-	unsigned long old_timeout[4];
+	unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
 	struct duration_t *duration_cap;
 	ssize_t rc;
 
@@ -542,11 +541,15 @@
 	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
 		return -EINVAL;
 
-	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
-	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
-	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
-	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
-	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+	timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+	timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+	timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+	timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+	timeout_chip[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+	timeout_chip[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+	timeout_chip[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+	timeout_chip[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+	memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
 
 	/*
 	 * Provide ability for vendor overrides of timeout values in case
@@ -554,16 +557,24 @@
 	 */
 	if (chip->ops->update_timeouts != NULL)
 		chip->timeout_adjusted =
-			chip->ops->update_timeouts(chip, new_timeout);
+			chip->ops->update_timeouts(chip, timeout_eff);
 
 	if (!chip->timeout_adjusted) {
-		/* Don't overwrite default if value is 0 */
-		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
-			int i;
+		/* Restore default if chip reported 0 */
+		int i;
 
+		for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+			if (timeout_eff[i])
+				continue;
+
+			timeout_eff[i] = timeout_old[i];
+			chip->timeout_adjusted = true;
+		}
+
+		if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
 			/* timeouts in msec rather usec */
-			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
-				new_timeout[i] *= 1000;
+			for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+				timeout_eff[i] *= 1000;
 			chip->timeout_adjusted = true;
 		}
 	}
@@ -572,16 +583,16 @@
 	if (chip->timeout_adjusted) {
 		dev_info(&chip->dev,
 			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
-			 old_timeout[0], new_timeout[0],
-			 old_timeout[1], new_timeout[1],
-			 old_timeout[2], new_timeout[2],
-			 old_timeout[3], new_timeout[3]);
+			 timeout_chip[0], timeout_eff[0],
+			 timeout_chip[1], timeout_eff[1],
+			 timeout_chip[2], timeout_eff[2],
+			 timeout_chip[3], timeout_eff[3]);
 	}
 
-	chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
-	chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
-	chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
-	chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
+	chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+	chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+	chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+	chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
 
 duration:
 	tpm_cmd.header.in = tpm_getcap_header;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b248b1b..1b545d6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1709,8 +1709,14 @@
 		}
 	}
 
+	/*
+	 * The Fabia PLLs only have 16 bits to program the fractional divider.
+	 * Hence the programmed rate might be slightly different than the
+	 * requested one.
+	 */
 	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
-	    best_parent_rate != parent->rate)
+		(DIV_ROUND_CLOSEST(best_parent_rate, 1000) !=
+			DIV_ROUND_CLOSEST(parent->rate, 1000)))
 		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
@@ -2345,6 +2351,56 @@
 	NULL,
 };
 
+static void clk_state_subtree(struct clk_core *c)
+{
+	int vdd_level = 0;
+	struct clk_core *child;
+
+	if (!c)
+		return;
+
+	if (c->vdd_class) {
+		vdd_level = clk_find_vdd_level(c, c->rate);
+		if (vdd_level < 0)
+			vdd_level = 0;
+	}
+
+	trace_clk_state(c->name, c->prepare_count, c->enable_count,
+						c->rate, vdd_level);
+
+	hlist_for_each_entry(child, &c->children, child_node)
+		clk_state_subtree(child);
+}
+
+static int clk_state_show(struct seq_file *s, void *data)
+{
+	struct clk_core *c;
+	struct hlist_head **lists = (struct hlist_head **)s->private;
+
+	clk_prepare_lock();
+
+	for (; *lists; lists++)
+		hlist_for_each_entry(c, *lists, child_node)
+			clk_state_subtree(c);
+
+	clk_prepare_unlock();
+
+	return 0;
+}
+
+
+static int clk_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clk_state_show, inode->i_private);
+}
+
+static const struct file_operations clk_state_fops = {
+	.open		= clk_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
 				 int level)
 {
@@ -2980,6 +3036,11 @@
 	if (!d)
 		return -ENOMEM;
 
+	d = debugfs_create_file("trace_clocks", 0444, rootdir, &all_lists,
+				&clk_state_fops);
+	if (!d)
+		return -ENOMEM;
+
 	mutex_lock(&clk_debug_lock);
 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
 		clk_debug_create_one(core, rootdir);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 6ff621d..d15d1bb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -307,6 +307,15 @@
 	u64 quotient;
 	int alpha_bw = ALPHA_BITWIDTH;
 
+	/*
+	 * The PLLs parent rate is zero probably since the parent hasn't
+	 * registered yet. Return early with the requested rate.
+	 */
+	if (!prate) {
+		pr_debug("PLLs parent rate hasn't been initialized.\n");
+		return rate;
+	}
+
 	quotient = rate;
 	remainder = do_div(quotient, prate);
 	*l = quotient;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 51a5e0b..5c4ddcc 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -343,6 +343,72 @@
 };
 EXPORT_SYMBOL_GPL(clk_branch2_ops);
 
+static int clk_branch2_hw_ctl_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	if (!(hw->init->flags & CLK_SET_RATE_PARENT)) {
+		pr_err("SET_RATE_PARENT flag needs to be set for %s\n",
+					clk_hw_get_name(hw));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	return parent_rate;
+}
+
+static int clk_branch2_hw_ctl_determine_rate(struct clk_hw *hw,
+		struct clk_rate_request *req)
+{
+	struct clk_hw *clkp;
+
+	clkp = clk_hw_get_parent(hw);
+	if (!clkp)
+		return -EINVAL;
+
+	req->best_parent_hw = clkp;
+	req->best_parent_rate = clk_round_rate(clkp->clk, req->rate);
+
+	return 0;
+}
+
+static int clk_branch2_hw_ctl_enable(struct clk_hw *hw)
+{
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+
+	/* The parent branch clock should have been prepared prior to this. */
+	if (!parent || (parent && !clk_hw_is_prepared(parent)))
+		return -EINVAL;
+
+	return clk_enable_regmap(hw);
+}
+
+static void clk_branch2_hw_ctl_disable(struct clk_hw *hw)
+{
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+
+	if (!parent)
+		return;
+
+	clk_disable_regmap(hw);
+}
+
+const struct clk_ops clk_branch2_hw_ctl_ops = {
+	.enable = clk_branch2_hw_ctl_enable,
+	.disable = clk_branch2_hw_ctl_disable,
+	.is_enabled = clk_is_enabled_regmap,
+	.set_rate = clk_branch2_hw_ctl_set_rate,
+	.recalc_rate = clk_branch2_hw_ctl_recalc_rate,
+	.determine_rate = clk_branch2_hw_ctl_determine_rate,
+	.set_flags = clk_branch_set_flags,
+	.list_registers = clk_branch2_list_registers,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_hw_ctl_ops);
+
 static int clk_gate_toggle(struct clk_hw *hw, bool en)
 {
 	struct clk_gate2 *gt = to_clk_gate2(hw);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 51209ea..f0fb6d5 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -64,6 +64,7 @@
 
 extern const struct clk_ops clk_branch_ops;
 extern const struct clk_ops clk_branch2_ops;
+extern const struct clk_ops clk_branch2_hw_ctl_ops;
 extern const struct clk_ops clk_gate2_ops;
 extern const struct clk_ops clk_branch_simple_ops;
 
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 035d337..61067ca 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -40,6 +40,7 @@
 #include "clk-regmap.h"
 #include "clk-rcg.h"
 #include "clk-voter.h"
+#include "clk-debug.h"
 
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
@@ -372,6 +373,7 @@
 	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_ops clk_ops_core;
@@ -507,6 +509,7 @@
 	.list_rate = clk_osm_list_rate,
 	.recalc_rate = l3_clk_recalc_rate,
 	.set_rate = l3_clk_set_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_init_data osm_clks_init[] = {
@@ -2251,7 +2254,7 @@
 	u32 pte_efuse, val;
 	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
 	struct clk *ext_xo_clk, *clk;
-	struct clk_osm *c;
+	struct clk_osm *c, *parent;
 	struct device *dev = &pdev->dev;
 	struct clk_onecell_data *clk_data;
 	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
@@ -2527,25 +2530,7 @@
 
 	get_online_cpus();
 
-	/* Enable OSM */
-	for_each_online_cpu(cpu) {
-		c = logical_cpu_to_clk(cpu);
-		if (!c) {
-			pr_err("no clock device for CPU=%d\n", cpu);
-			return -EINVAL;
-		}
-
-		rc = clk_set_rate(c->hw.clk, init_rate);
-		if (rc) {
-			dev_err(&pdev->dev, "Unable to set init rate on CPU %d, rc=%d\n",
-			cpu, rc);
-			goto provider_err;
-		}
-		WARN(clk_prepare_enable(c->hw.clk),
-		     "Failed to enable clock for cpu %d\n", cpu);
-		udelay(300);
-	}
-
+	/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
 	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
@@ -2556,6 +2541,43 @@
 		     "Failed to enable clock for L3\n");
 	udelay(300);
 
+	/* Set CPU clocks to run off GPLL0 and enable OSM for both domains */
+	for_each_online_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return -EINVAL;
+		}
+
+		parent = to_clk_osm(clk_hw_get_parent(&c->hw));
+		if (!parent->per_core_dcvs) {
+			if (cpu >= 0 && cpu <= 3)
+				c = logical_cpu_to_clk(0);
+			else if (cpu >= 4 && cpu <= 7)
+				c = logical_cpu_to_clk(4);
+			if (!c)
+				return -EINVAL;
+		}
+
+		rc = clk_set_rate(c->hw.clk, init_rate);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set init rate on %s, rc=%d\n",
+					clk_hw_get_name(&parent->hw), rc);
+			goto provider_err;
+		}
+		WARN(clk_prepare_enable(c->hw.clk),
+					"Failed to enable OSM for %s\n",
+					clk_hw_get_name(&parent->hw));
+		udelay(300);
+	}
+
+	/*
+	 * Add always-on votes for the CPU cluster clocks since we do not want
+	 * to re-enable OSM at any point.
+	 */
+	clk_prepare_enable(pwrcl_clk.hw.clk);
+	clk_prepare_enable(perfcl_clk.hw.clk);
+
 	populate_opp_table(pdev);
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
index 53288f7..fcc2493 100644
--- a/drivers/clk/qcom/clk-debug.c
+++ b/drivers/clk/qcom/clk-debug.c
@@ -133,12 +133,16 @@
 {
 	struct clk_debug_mux *meas = to_clk_measure(hw);
 	int i, num_parents = clk_hw_get_num_parents(hw);
+	struct clk_hw *hw_clk = clk_hw_get_parent(hw);
+
+	if (!hw_clk)
+		return 0;
 
 	for (i = 0; i < num_parents; i++) {
 		if (!strcmp(meas->parent[i].parents,
-					hw->init->parent_names[i])) {
-			pr_debug("%s: Clock name %s index %d\n", __func__,
-					hw->init->name, i);
+					clk_hw_get_name(hw_clk))) {
+			pr_debug("%s: clock parent - %s, index %d\n", __func__,
+					meas->parent[i].parents, i);
 			return i;
 		}
 	}
@@ -158,8 +162,8 @@
 		/* Update the recursive debug mux */
 		regmap_read(meas->regmap[dbg_cc],
 				meas->parent[index].mux_offset, &regval);
-		regval &= ~meas->parent[index].mux_sel_mask <<
-				meas->parent[index].mux_sel_shift;
+		regval &= ~(meas->parent[index].mux_sel_mask <<
+				meas->parent[index].mux_sel_shift);
 		regval |= (meas->parent[index].dbg_cc_mux_sel &
 				meas->parent[index].mux_sel_mask) <<
 				meas->parent[index].mux_sel_shift;
@@ -168,31 +172,34 @@
 
 		regmap_read(meas->regmap[dbg_cc],
 				meas->parent[index].post_div_offset, &regval);
-		regval &= ~meas->parent[index].post_div_mask <<
-				meas->parent[index].post_div_shift;
+		regval &= ~(meas->parent[index].post_div_mask <<
+				meas->parent[index].post_div_shift);
 		regval |= ((meas->parent[index].post_div_val - 1) &
 				meas->parent[index].post_div_mask) <<
 				meas->parent[index].post_div_shift;
 		regmap_write(meas->regmap[dbg_cc],
 				meas->parent[index].post_div_offset, regval);
 
-		regmap_read(meas->regmap[dbg_cc],
+		/* Not all recursive muxes have a DEBUG clock. */
+		if (meas->parent[index].cbcr_offset != U32_MAX) {
+			regmap_read(meas->regmap[dbg_cc],
 				meas->parent[index].cbcr_offset, &regval);
-		regval |= BIT(0);
-		regmap_write(meas->regmap[dbg_cc],
+			regval |= BIT(0);
+			regmap_write(meas->regmap[dbg_cc],
 				meas->parent[index].cbcr_offset, regval);
+		}
 	}
 
 	/* Update the debug sel for GCC */
 	regmap_read(meas->regmap[GCC], meas->debug_offset, &regval);
-	regval &= ~meas->src_sel_mask << meas->src_sel_shift;
+	regval &= ~(meas->src_sel_mask << meas->src_sel_shift);
 	regval |= (meas->parent[index].prim_mux_sel & meas->src_sel_mask) <<
 			meas->src_sel_shift;
 	regmap_write(meas->regmap[GCC], meas->debug_offset, regval);
 
 	/* Set the GCC mux's post divider bits */
 	regmap_read(meas->regmap[GCC], meas->post_div_offset, &regval);
-	regval &= ~meas->post_div_mask << meas->post_div_shift;
+	regval &= ~(meas->post_div_mask << meas->post_div_shift);
 	regval |= ((meas->parent[index].prim_mux_div_val - 1) &
 			meas->post_div_mask) << meas->post_div_shift;
 	regmap_write(meas->regmap[GCC], meas->post_div_offset, regval);
@@ -234,6 +241,10 @@
 		if (meas->parent[index].dbg_cc != GCC)
 			*val *= meas->parent[index].post_div_val;
 		*val *= meas->parent[index].prim_mux_div_val;
+
+		/* Accommodate for any pre-set dividers */
+		if (meas->parent[index].misc_div_val)
+			*val *= meas->parent[index].misc_div_val;
 	}
 
 	meas_rate = clk_get_rate(hw->clk);
@@ -244,7 +255,6 @@
 	sw_rate = clk_get_rate(par->clk);
 	if (sw_rate && meas_rate >= (sw_rate * 2))
 		*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
-
 	mutex_unlock(&clk_debug_lock);
 
 	return ret;
diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h
index 280704e..aa8d97b 100644
--- a/drivers/clk/qcom/clk-debug.h
+++ b/drivers/clk/qcom/clk-debug.h
@@ -66,6 +66,7 @@
  * @mux_offset:		the debug mux offset.
  * @post_div_offset:	register with post-divider settings for the debug mux.
  * @cbcr_offset:	branch register to turn on debug mux.
+ * @misc_div_val:	includes any pre-set dividers in the measurement logic.
  */
 struct clk_src {
 	const char *parents;
@@ -81,6 +82,7 @@
 	u32 mux_offset;
 	u32 post_div_offset;
 	u32 cbcr_offset;
+	u32 misc_div_val;
 };
 
 #define MUX_SRC_LIST(...) \
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 2f9cfdf..3d101ac 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1064,17 +1064,10 @@
 }
 
 static int clk_dp_determine_rate(struct clk_hw *hw,
-				struct clk_rate_request *req)
+		struct clk_rate_request *req)
 {
-	if (!hw)
-		return -EINVAL;
-
-	if (!clk_hw_get_parent(hw)) {
-		pr_err("Missing the parent for the DP RCG\n");
-		return -EINVAL;
-	}
-
-	req->best_parent_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+	req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw,
+							req->best_parent_rate);
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d30675c..44c5b81 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -272,6 +272,9 @@
 	"video_cc_venus_ahb_clk",
 	"video_cc_venus_ctl_axi_clk",
 	"video_cc_venus_ctl_core_clk",
+	"l3_clk",
+	"pwrcl_clk",
+	"perfcl_clk",
 };
 
 static struct clk_debug_mux gcc_debug_mux = {
@@ -766,6 +769,12 @@
 			0x4, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
 		{ "video_cc_venus_ctl_core_clk", 0x48, 4, VIDEO_CC,
 			0x1, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "l3_clk", 0xD6, 4, CPU,
+			0x46, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "pwrcl_clk", 0xD6, 4, CPU,
+			0x44, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "perfcl_clk", 0xD6, 4, CPU,
+			0x45, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
 	),
 	.hw.init = &(struct clk_init_data){
 		.name = "gcc_debug_mux",
@@ -862,6 +871,16 @@
 		}
 	}
 
+	if (of_get_property(pdev->dev.of_node, "qcom,cpucc", NULL)) {
+		gcc_debug_mux.regmap[CPU] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,cpucc");
+		if (IS_ERR(gcc_debug_mux.regmap[CPU])) {
+			pr_err("Failed to map qcom,cpucc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[CPU]);
+		}
+	}
+
 	clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 3b56fa1..d3a28e6 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -80,8 +80,8 @@
 
 static const char * const disp_cc_parent_names_1[] = {
 	"bi_tcxo",
-	"dp_phy_pll_link_clk",
-	"dp_phy_pll_vco_div_clk",
+	"dp_link_clk_divsel_ten",
+	"dp_vco_divided_clk_src_mux",
 	"core_bi_pll_test_se",
 };
 
@@ -217,12 +217,11 @@
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto_clk_src[] = {
-	F( 108000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 180000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 360000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 540000000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 108000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 180000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 360000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
 	{ }
 };
 
@@ -236,23 +235,22 @@
 		.name = "disp_cc_mdss_dp_crypto_clk_src",
 		.parent_names = disp_cc_parent_names_1,
 		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 12800000,
-			LOWER, 108000000,
-			LOW, 180000000,
-			LOW_L1, 360000000,
-			NOMINAL, 540000000),
+			MIN, 12800,
+			LOWER, 108000,
+			LOW, 180000,
+			LOW_L1, 360000,
+			NOMINAL, 540000),
 	},
 };
 
-/* Need to get the exact frequencies that are supported */
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
-	F_SLEW( 162000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  324000000),
-	F_SLEW( 270000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0,  540000000),
-	F_SLEW( 540000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1080000000),
-	F_SLEW( 810000000, P_DP_PHY_PLL_LINK_CLK,   2,   0,   0, 1620000000),
+	F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 540000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
+	F( 810000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
 	{ }
 };
 
@@ -269,11 +267,11 @@
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_rcg2_ops,
 		VDD_CX_FMAX_MAP5(
-			MIN, 19200000,
-			LOWER, 162000000,
-			LOW, 270000000,
-			LOW_L1, 540000000,
-			NOMINAL, 810000000),
+			MIN, 19200,
+			LOWER, 162000,
+			LOW, 270000,
+			LOW_L1, 540000,
+			NOMINAL, 810000),
 	},
 };
 
@@ -284,17 +282,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel1_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -305,17 +301,15 @@
 	.parent_map = disp_cc_parent_map_1,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_mdss_dp_pixel_clk_src",
-		.parent_names = (const char *[]){
-			"dp_phy_pll_vco_div_clk",
-		},
-		.num_parents = 1,
+		.parent_names = disp_cc_parent_names_1,
+		.num_parents = 4,
 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
 		.ops = &clk_dp_ops,
 		VDD_CX_FMAX_MAP4(
-			MIN, 19200000,
-			LOWER, 202500000,
-			LOW, 296735905,
-			LOW_L1, 675000000),
+			MIN, 19200,
+			LOWER, 202500,
+			LOW, 296735,
+			LOW_L1, 675000),
 	},
 };
 
@@ -664,23 +658,7 @@
 	},
 };
 
-static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
-	.reg = 0x2150,
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_link_div_clk_src",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
-	},
-};
-
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
 static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
 	.halt_reg = 0x2044,
 	.halt_check = BRANCH_HALT,
@@ -690,10 +668,10 @@
 		.hw.init = &(struct clk_init_data){
 			.name = "disp_cc_mdss_dp_link_intf_clk",
 			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_link_div_clk_src",
+				"disp_cc_mdss_dp_link_clk_src",
 			},
 			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+			.flags = CLK_GET_RATE_NOCACHE,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -955,8 +933,6 @@
 					&disp_cc_mdss_dp_crypto_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
 	[DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
-	[DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
-					&disp_cc_mdss_dp_link_div_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
 	[DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 678dd10..29421a1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -158,6 +158,12 @@
 	"core_bi_pll_test_se",
 };
 
+static const char * const gcc_parent_names_8[] = {
+	"bi_tcxo_ao",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
 static struct clk_dummy measure_only_snoc_clk = {
 	.rrate = 1000,
 	.hw.init = &(struct clk_init_data){
@@ -298,11 +304,11 @@
 	.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_rbcpr_clk_src",
-		.parent_names = gcc_parent_names_3,
+		.parent_names = gcc_parent_names_8,
 		.num_parents = 3,
 		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP1(
+		VDD_CX_FMAX_MAP1_AO(
 			MIN, 19200000),
 	},
 };
@@ -889,7 +895,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_gp1_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_axi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -917,7 +923,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_ice_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -937,6 +943,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_4,
 	.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_4,
@@ -961,7 +968,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_unipro_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -990,7 +997,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_axi_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1011,7 +1018,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
-	.enable_safe_config = true,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_ice_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1031,6 +1038,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_4,
 	.freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_phy_aux_clk_src",
 		.parent_names = gcc_parent_names_4,
@@ -1048,6 +1056,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_phy_unipro_core_clk_src",
 		.parent_names = gcc_parent_names_0,
@@ -1225,6 +1234,23 @@
 	},
 };
 
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+	.halt_reg = 0x82028,
+	.clkr = {
+		.enable_reg = 0x82028,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_aggre_ufs_card_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
 	.halt_reg = 0x82024,
 	.halt_check = BRANCH_HALT,
@@ -1243,6 +1269,23 @@
 	},
 };
 
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x82024,
+	.clkr = {
+		.enable_reg = 0x82024,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_aggre_ufs_phy_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
 	.halt_reg = 0x8201c,
 	.halt_check = BRANCH_HALT,
@@ -2575,6 +2618,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+	.halt_reg = 0x7500c,
+	.clkr = {
+		.enable_reg = 0x7500c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_card_clkref_clk = {
 	.halt_reg = 0x8c004,
 	.halt_check = BRANCH_HALT,
@@ -2606,6 +2666,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+	.halt_reg = 0x75058,
+	.clkr = {
+		.enable_reg = 0x75058,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_ice_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_card_phy_aux_clk = {
 	.halt_reg = 0x7508c,
 	.halt_check = BRANCH_HALT,
@@ -2624,6 +2701,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+	.halt_reg = 0x7508c,
+	.clkr = {
+		.enable_reg = 0x7508c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_phy_aux_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_gate2 gcc_ufs_card_rx_symbol_0_clk = {
 	.udelay = 500,
 	.clkr = {
@@ -2678,6 +2772,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+	.halt_reg = 0x75054,
+	.clkr = {
+		.enable_reg = 0x75054,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_card_unipro_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_mem_clkref_clk = {
 	.halt_reg = 0x8c000,
 	.halt_check = BRANCH_HALT,
@@ -2722,6 +2833,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+	.halt_reg = 0x7700c,
+	.clkr = {
+		.enable_reg = 0x7700c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_phy_ice_core_clk = {
 	.halt_reg = 0x77058,
 	.halt_check = BRANCH_HALT,
@@ -2740,6 +2868,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+	.halt_reg = 0x77058,
+	.clkr = {
+		.enable_reg = 0x77058,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
 	.halt_reg = 0x7708c,
 	.halt_check = BRANCH_HALT,
@@ -2758,6 +2903,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+	.halt_reg = 0x7708c,
+	.clkr = {
+		.enable_reg = 0x7708c,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_gate2 gcc_ufs_phy_rx_symbol_0_clk = {
 	.udelay = 500,
 	.clkr = {
@@ -2812,6 +2974,23 @@
 	},
 };
 
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+	.halt_reg = 0x77054,
+	.clkr = {
+		.enable_reg = 0x77054,
+		.enable_mask = BIT(1),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_hw_ctl_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_usb30_prim_master_clk = {
 	.halt_reg = 0xf00c,
 	.halt_check = BRANCH_HALT,
@@ -3094,7 +3273,11 @@
 static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+	[GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] =
+				&gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
 	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+	[GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] =
+				&gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
 	[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
 	[GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
 	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
@@ -3212,30 +3395,43 @@
 	[GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
 	[GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
 	[GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+	[GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
 	[GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
 	[GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+	[GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] =
+				&gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
 	[GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+	[GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] =
+				&gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
 	[GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
 	[GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
 	[GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
 	[GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+	[GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] =
+				&gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
 	[GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
 					&gcc_ufs_card_unipro_core_clk_src.clkr,
 	[GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
 	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+	[GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
 	[GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
 	[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+	[GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
 	[GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
 	[GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
 	[GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
 	[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] =
+				&gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
 	[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
 					&gcc_ufs_phy_unipro_core_clk_src.clkr,
 	[GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
@@ -3391,10 +3587,7 @@
 
 	/*
 	 * TODO:
-	 * 1. Support HW clock measurement
-	 * 2. Support UFS clock hw_ctrl
-	 * 3. Support mux clock interface for pcie pipe clocks
-	 * 4. QUPv3 support
+	 * 1. QUPv3 support
 	 */
 
 	dev_info(&pdev->dev, "Registered GCC clocks\n");
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 0115bb1..ae9d509 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -644,8 +644,6 @@
 		return ret;
 	}
 
-	clk_prepare_enable(gpu_cc_cxo_clk.clkr.hw.clk);
-
 	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
 
 	return ret;
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 3daefbc..93ad1b0 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -23,9 +23,10 @@
 
 #define VCO_DELAY_USEC 1
 
-#define MHZ_375		375000000UL
-#define MHZ_750		750000000UL
-#define MHZ_1500	1500000000UL
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
 #define MHZ_1900	1900000000UL
 #define MHZ_3000	3000000000UL
 
@@ -99,6 +100,7 @@
 	u32 frac_div_start_low;
 	u32 frac_div_start_mid;
 	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
 	u32 ssc_stepsize_low;
 	u32 ssc_stepsize_high;
 	u32 ssc_div_per_low;
@@ -209,20 +211,36 @@
 	u64 dec, dec_multiple;
 	u32 frac;
 	u64 multiplier;
+	u32 i;
 
 	target_freq = rsc->vco_current_rate;
 	pr_debug("target_freq = %llu\n", target_freq);
 
 	if (config->div_override) {
 		computed_output_div = config->output_div;
+
+		/*
+		 * Computed_output_div = 2 ^ div_log
+		 * To get div_log from output div just get the index of the
+		 * 1 bit in the value.
+		 * div_log ranges from 0-3. so check the 4 lsbs
+		 */
+
+		for (i = 0; i < 4; i++) {
+			if (computed_output_div & (1 << i)) {
+				div_log = i;
+				break;
+			}
+		}
+
 	} else {
-		if (target_freq < MHZ_375) {
+		if (target_freq < MHZ_250) {
 			computed_output_div = 8;
 			div_log = 3;
-		} else if (target_freq < MHZ_750) {
+		} else if (target_freq < MHZ_500) {
 			computed_output_div = 4;
 			div_log = 2;
-		} else if (target_freq < MHZ_1500) {
+		} else if (target_freq < MHZ_1000) {
 			computed_output_div = 2;
 			div_log = 1;
 		} else {
@@ -251,6 +269,10 @@
 		regs->pll_prop_gain_rate = 10;
 	else
 		regs->pll_prop_gain_rate = 12;
+	if (pll_freq < MHZ_1100)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
 
 	regs->pll_outdiv_rate = div_log;
 	regs->pll_lockdet_rate = config->lock_timer;
@@ -375,7 +397,7 @@
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
 	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
 
 }
 
@@ -581,6 +603,9 @@
 		pr_err("dsi pll resources not available\n");
 		return;
 	}
+	pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	pr_debug("cfg0=%d,cfg1=%d\n", pll->cached_cfg0, pll->cached_cfg1);
 
 	pll->vco_cached_rate = clk_hw_get_rate(hw);
 	dsi_pll_disable(vco);
@@ -615,6 +640,12 @@
 			mdss_pll_resource_enable(pll, false);
 			return rc;
 		}
+		pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
+			pll->cached_cfg1);
+		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
+					pll->cached_cfg0);
+		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
+					pll->cached_cfg1);
 	}
 
 	rc = dsi_pll_enable(vco);
@@ -1085,11 +1116,11 @@
 
 static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_vco_clk",
-			.parent_names = (const char *[]){"xo_board"},
+			.parent_names = (const char *[]){"bi_tcxo"},
 			.num_parents = 1,
 			.ops = &clk_ops_vco_10nm,
 			.flags = CLK_GET_RATE_NOCACHE,
@@ -1098,11 +1129,11 @@
 
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_vco_clk",
-			.parent_names = (const char *[]){"xo_board"},
+			.parent_names = (const char *[]){"bi_tcxo"},
 			.num_parents = 1,
 			.ops = &clk_ops_vco_10nm,
 			.flags = CLK_GET_RATE_NOCACHE,
@@ -1219,7 +1250,7 @@
 
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
@@ -1233,7 +1264,7 @@
 
 static struct clk_regmap_mux dsi1pll_byteclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
@@ -1247,7 +1278,7 @@
 
 static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_pclk_src_mux",
@@ -1262,7 +1293,7 @@
 
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_pclk_src_mux",
@@ -1307,7 +1338,7 @@
 
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
@@ -1321,7 +1352,7 @@
 
 static struct clk_regmap_mux dsi1pll_pclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index eccfcea..ee91e11 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -94,6 +94,8 @@
 	 * suspend/resume scenario. Cached the vco rate for such plls.
 	 */
 	unsigned long	vco_cached_rate;
+	u32		cached_cfg0;
+	u32		cached_cfg1;
 
 	/* dsi/edp/hmdi pll interface type */
 	u32		pll_interface_type;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 550a59c..5db1897 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -441,15 +441,14 @@
 {
 	u32 cntkctl = arch_timer_get_cntkctl();
 
-	/* Disable user access to the timers and the physical counter */
+	/* Disable user access to the timers */
 	/* Also disable virtual event stream */
 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
 			| ARCH_TIMER_USR_VT_ACCESS_EN
-			| ARCH_TIMER_VIRT_EVT_EN
-			| ARCH_TIMER_USR_PCT_ACCESS_EN);
+			| ARCH_TIMER_VIRT_EVT_EN);
 
-	/* Enable user access to the virtual counter */
-	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	/* Enable user access to the virtual and physical counters */
+	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_USR_PCT_ACCESS_EN;
 
 	arch_timer_set_cntkctl(cntkctl);
 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fd02eba..f61b78a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -385,58 +385,6 @@
 	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
-config CRYPTO_DEV_QCE50
-        bool
-
-config FIPS_ENABLE
-        bool "FIPS140-2 compliant build"
-        default n
-        help
-          This flag is used to make current build FIPS140-2
-          compliant. This flag will enable the patch of code
-          which will perform this task. Please select Y here
-          to enable.
-
-config CRYPTO_DEV_QCRYPTO
-        tristate "QTI Crypto accelerator"
-        select CRYPTO_DES
-        select CRYPTO_ALGAPI
-        select CRYPTO_AUTHENC
-        select CRYPTO_BLKCIPHER
-        default n
-        help
-          This driver supports QTI crypto acceleration
-          for kernel clients. To compile this driver as a module,
-          choose M here: the module will be called qcrypto. Please
-          select Y here to enable.
-
-config CRYPTO_DEV_QCOM_MSM_QCE
-        tristate "QTI Crypto Engine (QCE) module"
-        default n
-        help
-          This driver supports QTI Crypto Engine 5.0.
-          To compile this driver as a module, choose M here: the
-          module is called qce50.
-
-config CRYPTO_DEV_QCEDEV
-        tristate "QCEDEV Interface to CE module"
-        default n
-        help
-          This driver supports QTI QCEDEV Crypto Engine 5.0.
-          This exposes the interface to the QCE hardware accelerator
-          via IOCTLs.
-
-          To compile this driver as a module, choose M here: the
-          module will be called qcedev.
-
-config CRYPTO_DEV_OTA_CRYPTO
-        tristate "OTA Crypto module"
-        help
-          This driver supports QTI OTA Crypto in the FSM9xxx.
-          To compile this driver as a module, choose M here: the
-          module will be called ota_crypto. Please select Y here
-          to enable.
-
 config CRYPTO_DEV_NX
 	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
 	depends on PPC64
@@ -550,7 +498,49 @@
 	help
 	  This driver supports Qualcomm crypto engine accelerator
 	  hardware. To compile this driver as a module, choose M here. The
-	  module will be called qcrypto.
+	  module will be called qcrypt.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+	tristate "QTI Crypto Engine (QCE) module"
+	depends on ARCH_QCOM
+	help
+	  This driver supports QTI Crypto Engine accelerator hardware, which
+	  is present on SDM845. This is the core crypto driver which adds
+	  CE5.0 functionalities. To compile this driver as a module, choose
+	  M here. The module will be called QCE50.
+
+config CRYPTO_DEV_QCRYPTO
+	tristate "QTI Crypto accelerator"
+	depends on ARCH_QCOM
+	select CRYPTO_DES
+	select CRYPTO_ALGAPI
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  This driver supports QTI crypto acceleration
+	  for kernel clients. To compile this driver as a module,
+	  choose M here: the module will be called qcrypto. Please
+	  select Y here to enable.
+
+config CRYPTO_DEV_QCEDEV
+	tristate "QCEDEV Interface to CE module"
+	depends on ARCH_QCOM
+	help
+	  This driver supports QTI QCEDEV Crypto Engine 5.0.
+	  This exposes the interface to the QCE hardware accelerator
+	  via IOCTLs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+	tristate "OTA Crypto module"
+	depends on ARCH_QCOM
+	help
+	  This driver supports QTI OTA Crypto in the FSM9xxx.
+	  To compile this driver as a module, choose M here: the
+	  module will be called ota_crypto. Please select Y here
+	  to enable.
 
 config CRYPTO_DEV_VMX
 	bool "Support for VMX cryptographic acceleration instructions"
diff --git a/drivers/crypto/msm/Kconfig b/drivers/crypto/msm/Kconfig
index 0f4568b..3011aa6 100644
--- a/drivers/crypto/msm/Kconfig
+++ b/drivers/crypto/msm/Kconfig
@@ -2,7 +2,7 @@
 config CRYPTO_DEV_QCOM_ICE
 	tristate "Inline Crypto Module"
 	default n
-	depends on PFK && BLK_DEV_DM
+	depends on BLK_DEV_DM
 	help
 	  This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
 	  and later, to accelerate crypto operations for storage needs.
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index ba6825e..b411726 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -21,12 +21,31 @@
 #include <linux/cdev.h>
 #include <linux/regulator/consumer.h>
 #include <linux/msm-bus.h>
-#include <linux/pfk.h>
 #include <crypto/ice.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
 #include "iceregs.h"
 
+#ifdef CONFIG_PFK
+#include <linux/pfk.h>
+#else
+#include <linux/bio.h>
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return 0;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return 0;
+}
+
+static inline void pfk_clear_on_reset(void)
+{
+}
+#endif
+
 #define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
 	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
 
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 7b4ca24..2215dc1 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -124,6 +124,7 @@
 	bool use_sw_hmac_algo;
 	bool use_sw_aes_ccm_algo;
 	bool clk_mgmt_sus_res;
+	bool req_bw_before_clk;
 	unsigned int ce_device;
 	unsigned int ce_hw_instance;
 	unsigned int max_request;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 0cf4386..8af73ac 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -121,6 +121,7 @@
 	bool support_hw_key;
 	bool support_clk_mgmt_sus_res;
 	bool support_only_core_src_clk;
+	bool request_bw_before_clk;
 
 	void __iomem *iobase;	    /* Virtual io base of CE HW  */
 	unsigned int phy_iobase;    /* Physical io base of CE HW    */
@@ -298,7 +299,7 @@
 
 	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
 
-	dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d,    Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+	dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
 			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
 			pce_dev->ce_bam_info.dest_pipe_index,
 			pce_dev->ce_bam_info.src_pipe_index,
@@ -5675,6 +5676,8 @@
 		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
 	pce_dev->support_only_core_src_clk = of_property_read_bool(
 		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+	pce_dev->request_bw_before_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,request-bw-before-clk");
 
 	if (of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,bam-pipe-pair",
@@ -5762,6 +5765,9 @@
 
 	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
 	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		if (pce_dev->request_bw_before_clk)
+			goto skip_set_rate;
+
 		rc = clk_set_rate(pce_dev->ce_core_src_clk,
 						pce_dev->ce_opp_freq_hz);
 		if (rc) {
@@ -5780,6 +5786,7 @@
 		pce_dev->ce_core_src_clk = NULL;
 	}
 
+skip_set_rate:
 	if (pce_dev->support_only_core_src_clk) {
 		pce_dev->ce_core_clk = NULL;
 		pce_dev->ce_clk = NULL;
@@ -6096,6 +6103,7 @@
 	ce_support->hw_key = pce_dev->support_hw_key;
 	ce_support->aes_ccm = true;
 	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
 	if (pce_dev->ce_bam_info.minor_version)
 		ce_support->aligned_only = false;
 	else
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 348dc31..7f584ee 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -1,5 +1,5 @@
-obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
-qcrypto-objs := core.o \
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypt.o
+qcrypt-objs := core.o \
 		common.o \
 		dma.o \
 		sha.o \
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index d70104d..078e198 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -54,9 +54,9 @@
 #define MON2_ZONE_CNT(m)	((m)->base + 0x2D8)
 #define MON2_ZONE_MAX(m, zone)	((m)->base + 0x2E0 + 0x4 * zone)
 
-enum bwmon_type {
-	BWMON_1,
-	BWMON_2,
+enum mon_reg_type {
+	MON1,
+	MON2,
 };
 
 struct bwmon_spec {
@@ -91,25 +91,25 @@
 
 static DEFINE_SPINLOCK(glb_lock);
 
-static __always_inline void mon_enable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
 		break;
 	}
 }
 
-static __always_inline void mon_disable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(m->throttle_adj, MON2_EN(m));
 		break;
 	}
@@ -124,13 +124,13 @@
 #define MON_CLEAR_BIT	0x1
 #define MON_CLEAR_ALL_BIT	0x2
 static __always_inline
-void mon_clear(struct bwmon *m, bool clear_all, enum bwmon_type type)
+void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		if (clear_all)
 			writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
 		else
@@ -172,19 +172,19 @@
 }
 
 static __always_inline
-void mon_irq_enable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val |= INT_ENABLE_V1;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val |= INT_STATUS_MASK_HWS;
@@ -209,20 +209,20 @@
 }
 
 static __always_inline
-void mon_irq_disable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val &= ~INT_ENABLE_V1;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val &= ~INT_STATUS_MASK_HWS;
@@ -238,18 +238,18 @@
 }
 
 static __always_inline
-unsigned int mon_irq_status(struct bwmon *m, enum bwmon_type type)
+unsigned int mon_irq_status(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 mval;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
 		mval &= INT_STATUS_MASK;
 		break;
-	case BWMON_2:
+	case MON2:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
@@ -283,14 +283,14 @@
 }
 
 static __always_inline
-void mon_irq_clear(struct bwmon *m, enum bwmon_type type)
+void mon_irq_clear(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(INT_STATUS_MASK, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(INT_STATUS_MASK_HWS, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
@@ -419,7 +419,7 @@
 	unsigned long count, status;
 
 	count = readl_relaxed(MON_CNT(m));
-	status = mon_irq_status(m, BWMON_1);
+	status = mon_irq_status(m, MON1);
 
 	dev_dbg(m->dev, "Counter: %08lx\n", count);
 
@@ -469,15 +469,15 @@
 }
 
 static __always_inline
-unsigned long mon_get_count(struct bwmon *m, enum bwmon_type type)
+unsigned long mon_get_count(struct bwmon *m, enum mon_reg_type type)
 {
 	unsigned long count;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		count = mon_get_count1(m);
 		break;
-	case BWMON_2:
+	case MON2:
 		count = mon_get_zone_stats(m);
 		break;
 	}
@@ -499,7 +499,7 @@
 }
 
 static __always_inline
-unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum bwmon_type type)
+unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	unsigned long count;
@@ -515,12 +515,12 @@
 
 static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_1);
+	return __get_bytes_and_clear(hw, MON1);
 }
 
 static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_2);
+	return __get_bytes_and_clear(hw, MON2);
 }
 
 static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
@@ -529,10 +529,10 @@
 	u32 limit;
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_1);
+	mon_disable(m, MON1);
 	count = mon_get_count1(m);
-	mon_clear(m, false, BWMON_1);
-	mon_irq_clear(m, BWMON_1);
+	mon_clear(m, false, MON1);
+	mon_irq_clear(m, MON1);
 
 	if (likely(!m->spec->wrap_on_thres))
 		limit = bytes;
@@ -540,7 +540,7 @@
 		limit = max(bytes, 500000UL);
 
 	mon_set_limit(m, limit);
-	mon_enable(m, BWMON_1);
+	mon_enable(m, MON1);
 
 	return count;
 }
@@ -549,18 +549,18 @@
 {
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_2);
-	mon_clear(m, false, BWMON_2);
-	mon_irq_clear(m, BWMON_2);
+	mon_disable(m, MON2);
+	mon_clear(m, false, MON2);
+	mon_irq_clear(m, MON2);
 
 	mon_set_zones(m, sample_ms);
-	mon_enable(m, BWMON_2);
+	mon_enable(m, MON2);
 
 	return 0;
 }
 
 static irqreturn_t
-__bwmon_intr_handler(int irq, void *dev, enum bwmon_type type)
+__bwmon_intr_handler(int irq, void *dev, enum mon_reg_type type)
 {
 	struct bwmon *m = dev;
 
@@ -576,12 +576,12 @@
 
 static irqreturn_t bwmon_intr_handler(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_1);
+	return __bwmon_intr_handler(irq, dev, MON1);
 }
 
 static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_2);
+	return __bwmon_intr_handler(irq, dev, MON2);
 }
 
 static irqreturn_t bwmon_intr_thread(int irq, void *dev)
@@ -592,8 +592,8 @@
 	return IRQ_HANDLED;
 }
 
-static __always_inline int
-__start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps, enum bwmon_type type)
+static __always_inline int __start_bw_hwmon(struct bw_hwmon *hw,
+		unsigned long mbps, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	u32 limit, zone_actions;
@@ -601,11 +601,11 @@
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
 		break;
-	case BWMON_2:
+	case MON2:
 		zone_actions = calc_zone_actions();
 		handler = bwmon_intr_handler2;
 		break;
@@ -625,11 +625,11 @@
 	mon_clear(m, false, type);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		mon_set_limit(m, limit);
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_set_zones(m, hw->df->profile->polling_ms);
 		/* Set the zone actions to increment appropriate counters */
 		writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
@@ -645,16 +645,16 @@
 
 static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_1);
+	return __start_bw_hwmon(hw, mbps, MON1);
 }
 
 static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_2);
+	return __start_bw_hwmon(hw, mbps, MON2);
 }
 
 static __always_inline
-void __stop_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -667,16 +667,16 @@
 
 static void stop_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_1);
+	return __stop_bw_hwmon(hw, MON1);
 }
 
 static void stop_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_2);
+	return __stop_bw_hwmon(hw, MON2);
 }
 
 static __always_inline
-int __suspend_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+int __suspend_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -690,25 +690,25 @@
 
 static int suspend_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_1);
+	return __suspend_bw_hwmon(hw, MON1);
 }
 
 static int suspend_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_2);
+	return __suspend_bw_hwmon(hw, MON2);
 }
 
-static int __resume_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+static int __resume_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	int ret;
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		break;
-	case BWMON_2:
+	case MON2:
 		handler = bwmon_intr_handler2;
 		break;
 	}
@@ -731,12 +731,12 @@
 
 static int resume_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_1);
+	return __resume_bw_hwmon(hw, MON1);
 }
 
 static int resume_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_2);
+	return __resume_bw_hwmon(hw, MON2);
 }
 
 /*************************************************************************/
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index d7cc425..53c0f8a 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -143,25 +143,27 @@
 {									\
 	struct devfreq *df = to_devfreq(dev);				\
 	struct hwmon_node *hw = df->data;				\
-	int ret;							\
+	int ret, numvals;						\
 	unsigned int i = 0, val;					\
+	char **strlist;							\
 									\
-	do {								\
-		ret = kstrtoint(buf, 10, &val);				\
+	strlist = argv_split(GFP_KERNEL, buf, &numvals);		\
+	if (!strlist)							\
+		return -ENOMEM;						\
+	numvals = min(numvals, n - 1);					\
+	for (i = 0; i < numvals; i++) {					\
+		ret = kstrtouint(strlist[i], 10, &val);			\
 		if (ret)						\
-			break;						\
-		buf = strnchr(buf, PAGE_SIZE, ' ');			\
-		if (buf)						\
-			buf++;						\
+			goto out;					\
 		val = max(val, _min);					\
 		val = min(val, _max);					\
 		hw->name[i] = val;					\
-		i++;							\
-	} while (buf && i < n - 1);					\
-	if (i < 1)							\
-		return -EINVAL;						\
+	}								\
+	ret = count;							\
+out:									\
+	argv_free(strlist);						\
 	hw->name[i] = 0;						\
-	return count;							\
+	return ret;							\
 }
 
 #define gov_list_attr(__attr, n, min, max)	\
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 9a71ea0..c7260f9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -259,15 +259,11 @@
 			       dsi_ctrl->cell_index, op_state);
 			rc = -EINVAL;
 		} else if (state->power_state == DSI_CTRL_POWER_VREG_ON) {
-			if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
-			    (state->controller_state == DSI_CTRL_ENGINE_ON)) {
-				pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+			if (state->vid_engine_state == DSI_CTRL_ENGINE_ON) {
+				pr_debug("[%d]State error: op=%d: %d\n",
 				       dsi_ctrl->cell_index,
 				       op_state,
-				       state->cmd_engine_state,
-				       state->vid_engine_state,
-				       state->controller_state);
+				       state->vid_engine_state);
 				rc = -EINVAL;
 			}
 		}
@@ -701,7 +697,7 @@
 	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
 	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
 		rc = PTR_ERR(bus->bus_scale_table);
-		pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+		pr_debug("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
 		bus->bus_scale_table = NULL;
 		return rc;
 	}
@@ -1260,7 +1256,7 @@
 
 	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
 	if (rc)
-		pr_err("failed to init axi bus client, rc = %d\n", rc);
+		pr_debug("failed to init axi bus client, rc = %d\n", rc);
 
 	item->ctrl = dsi_ctrl;
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 122a63d..48c2370 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -248,7 +248,7 @@
 		reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
 		reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
 		width_final = mode->dsc->pclk_per_line;
-		stride_final = width_final * (h_stride / mode->h_active);
+		stride_final = mode->dsc->bytes_per_pkt;
 
 		reg = 0x39 << 8;
 		/*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index ee39ec7..563285d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -408,6 +408,7 @@
 	u32 pixel_clk_khz;
 	enum dsi_op_mode panel_mode;
 	u32 dsi_mode_flags;
+	struct msm_mode_info *mode_info;
 };
 
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 106511c..231f29b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1243,7 +1243,7 @@
 	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
 	if (IS_ERR_OR_NULL(mux->byte_clk)) {
 		rc = PTR_ERR(mux->byte_clk);
-		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
 		mux->byte_clk = NULL;
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1258,7 +1258,7 @@
 	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
 		rc = PTR_ERR(mux->pixel_clk);
 		mux->pixel_clk = NULL;
-		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
 		 * non-critical failure since these clocks are requied only for
@@ -1373,6 +1373,11 @@
 			if (rc)
 				pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
 					__func__, rc);
+
+			rc = dsi_display_phy_reset_config(display, false);
+			if (rc)
+				pr_err("%s: Failed to reset phy, rc=%d\n",
+						__func__, rc);
 		} else {
 			/* Make sure that controller is not in ULPS state when
 			 * the DSI link is not active.
@@ -1428,6 +1433,13 @@
 			}
 		}
 
+		rc = dsi_display_phy_reset_config(display, true);
+		if (rc) {
+			pr_err("%s: Failed to reset phy, rc=%d\n",
+						__func__, rc);
+			goto error;
+		}
+
 		rc = dsi_display_set_clamp(display, false);
 		if (rc) {
 			pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
@@ -1558,7 +1570,7 @@
 			display->lane_map.lane_map_v2[i] = BIT(temp[i]);
 		return 0;
 	} else if (rc != EINVAL) {
-		pr_warn("Incorrect mapping, configure default\n");
+		pr_debug("Incorrect mapping, configure default\n");
 		goto set_default;
 	}
 
@@ -2730,6 +2742,8 @@
 		break;
 	case DSI_OP_CMD_MODE:
 		info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+		info->is_te_using_watchdog_timer =
+			display->panel->te_using_watchdog_timer;
 		break;
 	default:
 		pr_err("unknwown dsi panel mode %d\n",
@@ -2973,18 +2987,11 @@
 		goto error_phy_disable;
 	}
 
-	rc = dsi_display_phy_reset_config(display, true);
-	if (rc) {
-		pr_err("[%s] failed to setup DSI controller, rc=%d\n",
-		       display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
 	rc = dsi_display_set_clk_src(display);
 	if (rc) {
 		pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
 			display->name, rc);
-		goto error_phy_reset_off;
+		goto error_ctrl_deinit;
 	}
 
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
@@ -2992,7 +2999,7 @@
 	if (rc) {
 		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
 		       display->name, rc);
-		goto error_phy_reset_off;
+		goto error_ctrl_deinit;
 	}
 
 	rc = dsi_display_ctrl_host_enable(display);
@@ -3015,8 +3022,6 @@
 error_ctrl_link_off:
 	(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_LINK_CLK, DSI_CLK_OFF);
-error_phy_reset_off:
-	(void)dsi_display_phy_reset_config(display, false);
 error_ctrl_deinit:
 	(void)dsi_display_ctrl_deinit(display);
 error_phy_disable:
@@ -3110,6 +3115,11 @@
 		pr_err("[%s] panel post-enable failed, rc=%d\n",
 		       display->name, rc);
 
+	/* remove the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+
 	mutex_unlock(&display->display_lock);
 	return rc;
 }
@@ -3125,6 +3135,11 @@
 
 	mutex_lock(&display->display_lock);
 
+	/* enable the clk vote for CMD mode panels */
+	if (display->config.panel_mode == DSI_OP_CMD_MODE)
+		dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+
 	rc = dsi_panel_pre_disable(display->panel);
 	if (rc)
 		pr_err("[%s] panel pre-disable failed, rc=%d\n",
@@ -3233,11 +3248,6 @@
 		pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
 		       display->name, rc);
 
-	rc = dsi_display_phy_reset_config(display, false);
-	if (rc)
-		pr_err("[%s] failed to disable DSI PHY reset config, rc=%d\n",
-		       display->name, rc);
-
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_OFF);
 	if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 556c0d8..3f4bb5a5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -50,6 +50,8 @@
 	dsi_mode->pixel_clk_khz = drm_mode->clock;
 	dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
 
+	dsi_mode->mode_info = (struct msm_mode_info *)drm_mode->private;
+
 	if (msm_is_mode_seamless(drm_mode))
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_SEAMLESS;
 	if (msm_is_mode_dynamic_fps(drm_mode))
@@ -81,6 +83,8 @@
 	drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
 	drm_mode->clock = dsi_mode->pixel_clk_khz;
 
+	drm_mode->private = (int *)dsi_mode->mode_info;
+
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)
 		drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DFPS)
@@ -255,6 +259,26 @@
 	return ret;
 }
 
+int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
+	struct msm_display_topology *topology,
+	u32 max_mixer_width)
+{
+	struct dsi_display_mode dsi_mode;
+
+	if (!drm_mode || !topology)
+		return -EINVAL;
+
+	convert_to_dsi_mode(drm_mode, &dsi_mode);
+
+	if (!dsi_mode.mode_info)
+		return -EINVAL;
+
+	memcpy(topology, &dsi_mode.mode_info->topology,
+			sizeof(struct msm_display_topology));
+
+	return 0;
+}
+
 static const struct drm_bridge_funcs dsi_bridge_ops = {
 	.attach       = dsi_bridge_attach,
 	.mode_fixup   = dsi_bridge_mode_fixup,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 4339a11..68520a8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -64,6 +64,17 @@
 		void *display);
 
 /**
+ * dsi_conn_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: Zero on success
+ */
+int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
+	struct msm_display_topology *topology,
+	u32 max_mixer_width);
+
+/**
  * dsi_conn_mode_valid - callback to determine if specified mode is valid
  * @connector: Pointer to drm connector structure
  * @mode: Pointer to drm mode structure
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 8250da3..174be9f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -37,7 +37,7 @@
 	readl_relaxed((dsi_hw)->disp_cc_base + (off))
 #define DSI_DISP_CC_W32(dsi_hw, off, val) \
 	do {\
-		pr_err("[DSI_%d][%s] - [0x%08x]\n", \
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
 			(dsi_hw)->index, #off, val); \
 		writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
 	} while (0)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index b814eb8..deb718c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -20,6 +20,19 @@
 #include "dsi_panel.h"
 #include "dsi_ctrl_hw.h"
 
+#define MAX_CMDLINE_PARAM_LEN 256
+static char display_config[MAX_CMDLINE_PARAM_LEN];
+
+/**
+ * topology is currently defined by a set of following 3 values:
+ * 1. num of layer mixers
+ * 2. num of compression encoders
+ * 3. num of interfaces
+ */
+#define TOPOLOGY_SET_LEN 3
+#define INT_BASE_10 10
+#define MAX_TOPOLOGY 5
+
 #define DSI_PANEL_DEFAULT_LABEL  "Default dsi panel"
 
 #define DEFAULT_MDP_TRANSFER_TIME 14000
@@ -527,15 +540,42 @@
 }
 #endif
 
+static int dsi_panel_update_backlight(struct dsi_panel *panel,
+	u32 bl_lvl)
+{
+	int rc = 0;
+	struct mipi_dsi_device *dsi;
+
+	if (!panel || (bl_lvl > 0xffff)) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	dsi = &panel->mipi_device;
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = mipi_dsi_dcs_set_display_brightness(dsi, bl_lvl);
+	if (rc < 0)
+		pr_err("failed to update dcs backlight:%d\n", bl_lvl);
+
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
 int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
 {
 	int rc = 0;
 	struct dsi_backlight_config *bl = &panel->bl_config;
 
+	pr_debug("backlight type:%d lvl:%d\n", bl->type, bl_lvl);
 	switch (bl->type) {
 	case DSI_BACKLIGHT_WLED:
 		led_trigger_event(bl->wled, bl_lvl);
 		break;
+	case DSI_BACKLIGHT_DCS:
+		dsi_panel_update_backlight(panel, bl_lvl);
+		break;
 	default:
 		pr_err("Backlight type(%d) not supported\n", bl->type);
 		rc = -ENOTSUPP;
@@ -553,6 +593,8 @@
 	case DSI_BACKLIGHT_WLED:
 		rc = dsi_panel_led_bl_register(panel, bl);
 		break;
+	case DSI_BACKLIGHT_DCS:
+		break;
 	default:
 		pr_err("Backlight type(%d) not supported\n", bl->type);
 		rc = -ENOTSUPP;
@@ -572,6 +614,8 @@
 	case DSI_BACKLIGHT_WLED:
 		led_trigger_unregister_simple(bl->wled);
 		break;
+	case DSI_BACKLIGHT_DCS:
+		break;
 	default:
 		pr_err("Backlight type(%d) not supported\n", bl->type);
 		rc = -ENOTSUPP;
@@ -1371,7 +1415,7 @@
 
 	data = of_get_property(of_node, cmd_set_prop_map[type], &length);
 	if (!data) {
-		pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+		pr_debug("%s commands not defined\n", cmd_set_prop_map[type]);
 		rc = -ENOTSUPP;
 		goto error;
 	}
@@ -1436,7 +1480,7 @@
 		} else {
 			rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
 			if (rc)
-				pr_err("[%s] failed to parse set %d\n",
+				pr_debug("[%s] failed to parse set %d\n",
 					panel->name, i);
 		}
 	}
@@ -1512,14 +1556,17 @@
 	return rc;
 }
 
-static int dsi_panel_parse_features(struct dsi_panel *panel,
+static int dsi_panel_parse_misc_features(struct dsi_panel *panel,
 				     struct device_node *of_node)
 {
 	panel->ulps_enabled =
 		of_property_read_bool(of_node, "qcom,ulps-enabled");
 
-	pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+	if (panel->ulps_enabled)
+		pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
 
+	panel->te_using_watchdog_timer = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-te-using-wd");
 	return 0;
 }
 
@@ -1742,8 +1789,8 @@
 					      "qcom,platform-bklight-en-gpio",
 					      0);
 	if (!gpio_is_valid(panel->bl_config.en_gpio)) {
-		pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
-		rc = -EINVAL;
+		pr_debug("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+		rc = 0;
 		goto error;
 	}
 
@@ -1912,25 +1959,18 @@
 	u32 data;
 	int rc = -EINVAL;
 	int intf_width;
-	struct device_node *dsc_np = NULL;
 
 	if (!panel->dsc_enabled)
 		return 0;
 
-	dsc_np = of_parse_phandle(of_node, "qcom,config-select", 0);
-	if (!dsc_np) {
-		pr_err("no dsc config found\n");
-		goto error;
-	}
-
-	rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-height", &data);
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-height", &data);
 	if (rc) {
 		pr_err("failed to parse qcom,mdss-dsc-slice-height\n");
 		goto error;
 	}
 	panel->dsc.slice_height = data;
 
-	rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-width", &data);
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-width", &data);
 	if (rc) {
 		pr_err("failed to parse qcom,mdss-dsc-slice-width\n");
 		goto error;
@@ -1946,14 +1986,15 @@
 	panel->dsc.pic_width = panel->mode.timing.h_active;
 	panel->dsc.pic_height = panel->mode.timing.v_active;
 
-	rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-per-pkt", &data);
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-per-pkt",
+			&data);
 	if (rc) {
 		pr_err("failed to parse qcom,mdss-dsc-slice-per-pkt\n");
 		goto error;
 	}
 	panel->dsc.slice_per_pkt = data;
 
-	rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-bit-per-component",
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-component",
 		&data);
 	if (rc) {
 		pr_err("failed to parse qcom,mdss-dsc-bit-per-component\n");
@@ -1961,14 +2002,15 @@
 	}
 	panel->dsc.bpc = data;
 
-	rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-bit-per-pixel", &data);
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-pixel",
+			&data);
 	if (rc) {
 		pr_err("failed to parse qcom,mdss-dsc-bit-per-pixel\n");
 		goto error;
 	}
 	panel->dsc.bpp = data;
 
-	panel->dsc.block_pred_enable = of_property_read_bool(dsc_np,
+	panel->dsc.block_pred_enable = of_property_read_bool(of_node,
 		"qcom,mdss-dsc-block-prediction-enable");
 
 	panel->dsc.full_frame_slices = DIV_ROUND_UP(intf_width,
@@ -2027,6 +2069,112 @@
 	return 0;
 }
 
+static int dsi_get_cmdline_top_override(void)
+{
+	char *str = display_config;
+	int top_index = -1;
+
+	/*
+	 * This module need to be updated with needed cmd line argument parsing
+	 * for other dsi parameters.
+	 */
+	if (strlcat(str, "\0", sizeof(str)) > sizeof(str))
+		return -EINVAL;
+
+	str = strnstr(display_config, "config", strlen(display_config));
+	if (!str)
+		return -EINVAL;
+
+	if (kstrtol(str + strlen("config"), INT_BASE_10,
+				(unsigned long *)&top_index))
+		return -EINVAL;
+
+	return top_index;
+}
+
+static int dsi_panel_parse_topology(struct dsi_panel *panel,
+		struct device_node *of_node)
+{
+	struct msm_display_topology *topology;
+	u32 top_count, top_sel, *array = NULL;
+	int i, len = 0;
+	int rc = -EINVAL;
+
+	len = of_property_count_u32_elems(of_node, "qcom,display-topology");
+	if (len <= 0 || len % TOPOLOGY_SET_LEN ||
+			len > (TOPOLOGY_SET_LEN * MAX_TOPOLOGY)) {
+		pr_err("invalid topology list for the panel, rc = %d\n", rc);
+		return rc;
+	}
+
+	top_count = len / TOPOLOGY_SET_LEN;
+
+	array = kcalloc(len, sizeof(u32), GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,display-topology", array, len);
+	if (rc) {
+		pr_err("unable to read the display topologies, rc = %d\n", rc);
+		goto read_fail;
+	}
+
+	topology = kcalloc(top_count, sizeof(*topology), GFP_KERNEL);
+	if (!topology) {
+		rc = -ENOMEM;
+		goto read_fail;
+	}
+
+	for (i = 0; i < top_count; i++) {
+		struct msm_display_topology *top = &topology[i];
+
+		top->num_lm = array[i * TOPOLOGY_SET_LEN];
+		top->num_enc = array[i * TOPOLOGY_SET_LEN + 1];
+		top->num_intf = array[i * TOPOLOGY_SET_LEN + 2];
+	};
+
+	top_sel = dsi_get_cmdline_top_override();
+	if (top_sel >= 0 && top_sel < top_count) {
+		pr_info("overidden topology: lm: %d comp_enc:%d intf: %d\n",
+			topology[top_sel].num_lm,
+			topology[top_sel].num_enc,
+			topology[top_sel].num_intf);
+		goto parse_done;
+	}
+
+	rc = of_property_read_u32(of_node,
+			"qcom,default-topology-index", &top_sel);
+	if (rc) {
+		pr_err("no default topology selected, rc = %d\n", rc);
+		goto parse_fail;
+	}
+
+	if (top_sel >= top_count) {
+		rc = -EINVAL;
+		pr_err("default topology is specified is not valid, rc = %d\n",
+			rc);
+		goto parse_fail;
+	}
+
+	pr_info("default topology: lm: %d comp_enc:%d intf: %d\n",
+		topology[top_sel].num_lm,
+		topology[top_sel].num_enc,
+		topology[top_sel].num_intf);
+
+parse_done:
+	panel->mode.mode_info = kzalloc(sizeof(struct msm_mode_info),
+			GFP_KERNEL);
+	memcpy(&panel->mode.mode_info->topology, &topology[top_sel],
+		sizeof(struct msm_display_topology));
+parse_fail:
+	kfree(topology);
+read_fail:
+	kfree(array);
+
+	return rc;
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node)
 {
@@ -2084,6 +2232,13 @@
 	panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
 				    DSI_V_TOTAL(&panel->mode.timing) *
 				    panel->mode.timing.refresh_rate) / 1000;
+
+	rc = dsi_panel_parse_topology(panel, of_node);
+	if (rc) {
+		pr_err("failed to parse panel topology, rc=%d\n", rc);
+		goto error;
+	}
+
 	rc = dsi_panel_parse_host_config(panel, of_node);
 	if (rc) {
 		pr_err("failed to parse host configuration, rc=%d\n", rc);
@@ -2128,7 +2283,7 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
-	rc = dsi_panel_parse_features(panel, of_node);
+	rc = dsi_panel_parse_misc_features(panel, of_node);
 	if (rc)
 		pr_err("failed to parse panel features, rc=%d\n", rc);
 
@@ -2153,6 +2308,8 @@
 	for (i = 0; i < DSI_CMD_SET_MAX; i++)
 		dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
 
+	kfree(panel->mode.mode_info);
+
 	/* TODO:  more free */
 	kfree(panel);
 }
@@ -2611,3 +2768,6 @@
 	mutex_unlock(&panel->panel_lock);
 	return rc;
 }
+
+module_param_string(display_param, display_config, MAX_CMDLINE_PARAM_LEN, 0600);
+MODULE_PARM_DESC(display_param, "format: configx - x indexes the selected topology from the display topology list. Index 0 corresponds to the first topology in the list");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 9f63089..de2b5b1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -184,6 +184,7 @@
 	u32 panel_jitter;
 	u32 panel_prefill_lines;
 	bool panel_initialized;
+	bool te_using_watchdog_timer;
 
 	bool dsc_enabled;
 	char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index ebfb40b8..a1a0e57 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -791,23 +791,12 @@
 		   bool skip_validation)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy || !config) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 
 	if (!skip_validation)
@@ -839,10 +828,6 @@
 error:
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
 	return rc;
 }
 
@@ -855,34 +840,17 @@
 int dsi_phy_disable(struct msm_dsi_phy *phy)
 {
 	int rc = 0;
-	struct dsi_clk_ctrl_info clk_info;
 
 	if (!phy) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
-	clk_info.client = DSI_CLK_REQ_DSI_CLIENT;
-	clk_info.clk_type = DSI_CORE_CLK;
-	clk_info.clk_state = DSI_CLK_ON;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc) {
-		pr_err("failed to enable DSI core clocks\n");
-		return rc;
-	}
-
 	mutex_lock(&phy->phy_lock);
 	dsi_phy_disable_hw(phy);
 	phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
 	mutex_unlock(&phy->phy_lock);
 
-	clk_info.clk_state = DSI_CLK_OFF;
-
-	rc = phy->clk_cb.dsi_clk_cb(phy->clk_cb.priv, clk_info);
-	if (rc)
-		pr_err("failed to disable DSI core clocks\n");
-
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 609c5ff..e2219aa 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -238,7 +238,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
@@ -296,7 +297,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4471d0b..d50a185 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,6 +164,8 @@
 	/* enum/bitmask properties */
 	CONNECTOR_PROP_TOPOLOGY_NAME,
 	CONNECTOR_PROP_TOPOLOGY_CONTROL,
+	CONNECTOR_PROP_AUTOREFRESH,
+	CONNECTOR_PROP_LP,
 
 	/* total # of properties */
 	CONNECTOR_PROP_COUNT
@@ -353,6 +355,26 @@
 };
 
 /**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm:       number of layer mixers used
+ * @num_enc:      number of compression encoder blocks used
+ * @num_intf:     number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+	u32 num_lm;
+	u32 num_enc;
+	u32 num_intf;
+};
+
+/**
+ * struct msm_mode_info - defines all msm custom mode info
+ * @topology - supported topology for the mode
+ */
+struct msm_mode_info {
+	struct msm_display_topology topology;
+};
+
+/**
  * struct msm_display_info - defines display properties
  * @intf_type:          DRM_MODE_CONNECTOR_ display type
  * @capabilities:       Bitmask of display flags
@@ -367,6 +389,8 @@
  * @max_height:         Max height of display. In case of hot pluggable display
  *                      this is max height supported by controller
  * @is_primary:         Set to true if display is primary display
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *				 used instead of panel TE in cmd mode panels
  * @frame_rate:		Display frame rate
  * @prefill_lines:	prefill lines based on porches.
  * @vtotal:		display vertical total
@@ -390,6 +414,7 @@
 	uint32_t max_height;
 
 	bool is_primary;
+	bool is_te_using_watchdog_timer;
 	uint32_t frame_rate;
 	uint32_t prefill_lines;
 	uint32_t vtotal;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8487f46..3c3f335 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -105,9 +105,13 @@
 
 static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
 {
-	struct circ_buf *fifo = &rd->fifo;
+	struct circ_buf *fifo;
 	const char *ptr = buf;
 
+	if (!rd || !buf)
+		return;
+
+	fifo = &rd->fifo;
 	while (sz > 0) {
 		char *fptr = &fifo->buf[fifo->head];
 		int n;
@@ -136,11 +140,18 @@
 static ssize_t rd_read(struct file *file, char __user *buf,
 		size_t sz, loff_t *ppos)
 {
-	struct msm_rd_state *rd = file->private_data;
-	struct circ_buf *fifo = &rd->fifo;
-	const char *fptr = &fifo->buf[fifo->tail];
+	struct msm_rd_state *rd;
+	struct circ_buf *fifo;
+	const char *fptr;
 	int n = 0, ret = 0;
 
+	if (!file || !file->private_data || !buf || !ppos)
+		return -EINVAL;
+
+	rd = file->private_data;
+	fifo = &rd->fifo;
+	fptr = &fifo->buf[fifo->tail];
+
 	mutex_lock(&rd->read_lock);
 
 	ret = wait_event_interruptible(rd->fifo_event,
@@ -168,19 +179,34 @@
 
 static int rd_open(struct inode *inode, struct file *file)
 {
-	struct msm_rd_state *rd = inode->i_private;
-	struct drm_device *dev = rd->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gpu *gpu = priv->gpu;
+	struct msm_rd_state *rd;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct msm_gpu *gpu;
 	uint64_t val;
 	uint32_t gpu_id;
 	int ret = 0;
 
+	if (!file || !inode || !inode->i_private)
+		return -EINVAL;
+
+	rd = inode->i_private;
+	dev = rd->dev;
+
+	if (!dev || !dev->dev_private)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	gpu = priv->gpu;
+
 	mutex_lock(&dev->struct_mutex);
 
 	if (rd->open || !gpu) {
 		ret = -EBUSY;
 		goto out;
+	} else if (!gpu->funcs || !gpu->funcs->get_param) {
+		ret = -EINVAL;
+		goto out;
 	}
 
 	file->private_data = rd;
@@ -201,7 +227,12 @@
 
 static int rd_release(struct inode *inode, struct file *file)
 {
-	struct msm_rd_state *rd = inode->i_private;
+	struct msm_rd_state *rd;
+
+	if (!inode || !inode->i_private)
+		return -EINVAL;
+
+	rd = inode->i_private;
 	rd->open = false;
 	return 0;
 }
@@ -217,9 +248,14 @@
 
 int msm_rd_debugfs_init(struct drm_minor *minor)
 {
-	struct msm_drm_private *priv = minor->dev->dev_private;
+	struct msm_drm_private *priv;
 	struct msm_rd_state *rd;
 
+	if (!minor || !minor->dev || !minor->dev->dev_private)
+		return -EINVAL;
+
+	priv = minor->dev->dev_private;
+
 	/* only create on first minor: */
 	if (priv->rd)
 		return 0;
@@ -265,8 +301,14 @@
 
 void msm_rd_debugfs_cleanup(struct drm_minor *minor)
 {
-	struct msm_drm_private *priv = minor->dev->dev_private;
-	struct msm_rd_state *rd = priv->rd;
+	struct msm_drm_private *priv;
+	struct msm_rd_state *rd;
+
+	if (!minor || !minor->dev || !minor->dev->dev_private)
+		return;
+
+	priv = minor->dev->dev_private;
+	rd = priv->rd;
 
 	if (!rd)
 		return;
@@ -315,13 +357,20 @@
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_gem_submit *submit)
 {
-	struct drm_device *dev = submit->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_rd_state *rd = priv->rd;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct msm_rd_state *rd;
 	char msg[128];
 	int i, n;
 
-	if (!rd->open)
+	if (!submit || !submit->dev || !submit->dev->dev_private)
+		return;
+
+	dev = submit->dev;
+	priv = dev->dev_private;
+	rd = priv->rd;
+
+	if (!rd || !rd->open)
 		return;
 
 	/* writing into fifo is serialized by caller, and
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index e3f8261..6593b47 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt)	"sde-drm:[%s] " fmt, __func__
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include "msm_drv.h"
 
 #include "sde_kms.h"
@@ -21,6 +21,9 @@
 
 #define BL_NODE_NAME_SIZE 32
 
+/* Autorefresh will occur after FRAME_CNT frames. Large values are unlikely */
+#define AUTOREFRESH_MAX_FRAME_CNT 6
+
 #define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
 		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
 
@@ -28,18 +31,26 @@
 		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
 
 static const struct drm_prop_enum_list e_topology_name[] = {
-	{SDE_RM_TOPOLOGY_UNKNOWN,	"sde_unknown"},
+	{SDE_RM_TOPOLOGY_NONE,	"sde_none"},
 	{SDE_RM_TOPOLOGY_SINGLEPIPE,	"sde_singlepipe"},
+	{SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,	"sde_singlepipe_dsc"},
 	{SDE_RM_TOPOLOGY_DUALPIPE,	"sde_dualpipe"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSC,	"sde_dualpipe_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,	"sde_dualpipemerge"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,	"sde_dualpipemerge_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,	"sde_dualpipe_dscmerge"},
 	{SDE_RM_TOPOLOGY_PPSPLIT,	"sde_ppsplit"},
-	{SDE_RM_TOPOLOGY_DUALPIPEMERGE,	"sde_dualpipemerge"}
 };
 static const struct drm_prop_enum_list e_topology_control[] = {
 	{SDE_RM_TOPCTL_RESERVE_LOCK,	"reserve_lock"},
 	{SDE_RM_TOPCTL_RESERVE_CLEAR,	"reserve_clear"},
 	{SDE_RM_TOPCTL_DSPP,		"dspp"},
-	{SDE_RM_TOPCTL_FORCE_TILING,	"force_tiling"},
-	{SDE_RM_TOPCTL_PPSPLIT,		"ppsplit"}
+};
+static const struct drm_prop_enum_list e_power_mode[] = {
+	{SDE_MODE_DPMS_ON,	"ON"},
+	{SDE_MODE_DPMS_LP1,	"LP1"},
+	{SDE_MODE_DPMS_LP2,	"LP2"},
+	{SDE_MODE_DPMS_OFF,	"OFF"},
 };
 
 static int sde_backlight_device_update_status(struct backlight_device *bd)
@@ -291,6 +302,7 @@
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
+	mutex_destroy(&c_conn->lock);
 	sde_fence_deinit(&c_conn->retire_fence);
 	drm_connector_cleanup(connector);
 	kfree(c_conn);
@@ -538,6 +550,56 @@
 	return 0;
 }
 
+static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
+{
+	struct drm_connector *connector;
+	void *display;
+	int (*set_power)(struct drm_connector *, int, void *);
+	int mode, rc = 0;
+
+	if (!c_conn)
+		return -EINVAL;
+	connector = &c_conn->base;
+
+	mode = c_conn->lp_mode;
+	if (c_conn->dpms_mode != DRM_MODE_DPMS_ON)
+		mode = SDE_MODE_DPMS_OFF;
+	switch (c_conn->dpms_mode) {
+	case DRM_MODE_DPMS_ON:
+		mode = c_conn->lp_mode;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		mode = SDE_MODE_DPMS_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		mode = SDE_MODE_DPMS_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		mode = SDE_MODE_DPMS_OFF;
+		break;
+	default:
+		mode = c_conn->lp_mode;
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+				connector->base.id, mode);
+		break;
+	}
+
+	SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id,
+			c_conn->dpms_mode, c_conn->lp_mode, mode);
+
+	if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) {
+		display = c_conn->display;
+		set_power = c_conn->ops.set_power;
+
+		mutex_unlock(&c_conn->lock);
+		rc = set_power(connector, mode, display);
+		mutex_lock(&c_conn->lock);
+	}
+	c_conn->last_panel_power_mode = mode;
+
+	return rc;
+}
+
 static int sde_connector_atomic_set_property(struct drm_connector *connector,
 		struct drm_connector_state *state,
 		struct drm_property *property,
@@ -564,8 +626,8 @@
 
 	/* connector-specific property handling */
 	idx = msm_property_index(&c_conn->property_info, property);
-
-	if (idx == CONNECTOR_PROP_OUT_FB) {
+	switch (idx) {
+	case CONNECTOR_PROP_OUT_FB:
 		/* clear old fb, if present */
 		if (c_state->out_fb)
 			_sde_connector_destroy_fb(c_conn, c_state);
@@ -595,12 +657,15 @@
 			if (rc)
 				SDE_ERROR("prep fb failed, %d\n", rc);
 		}
-	}
-
-	if (idx == CONNECTOR_PROP_TOPOLOGY_CONTROL) {
-		rc = sde_rm_check_property_topctl(val);
-		if (rc)
-			SDE_ERROR("invalid topology_control: 0x%llX\n", val);
+		break;
+	case CONNECTOR_PROP_LP:
+		mutex_lock(&c_conn->lock);
+		c_conn->lp_mode = val;
+		_sde_connector_update_power_locked(c_conn);
+		mutex_unlock(&c_conn->lock);
+		break;
+	default:
+		break;
 	}
 
 	if (idx == CONNECTOR_PROP_ROI_V1) {
@@ -716,6 +781,59 @@
 	return status;
 }
 
+static int sde_connector_dpms(struct drm_connector *connector,
+				     int mode)
+{
+	struct sde_connector *c_conn;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return -EINVAL;
+	}
+	c_conn = to_sde_connector(connector);
+
+	/* validate incoming dpms request */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		SDE_DEBUG("conn %d dpms set to %d\n", connector->base.id, mode);
+		break;
+	default:
+		SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+				connector->base.id, mode);
+		break;
+	}
+
+	mutex_lock(&c_conn->lock);
+	c_conn->dpms_mode = mode;
+	_sde_connector_update_power_locked(c_conn);
+	mutex_unlock(&c_conn->lock);
+
+	/* use helper for boilerplate handling */
+	return drm_atomic_helper_connector_dpms(connector, mode);
+}
+
+int sde_connector_get_dpms(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	int rc;
+
+	if (!connector) {
+		SDE_DEBUG("invalid connector\n");
+		return DRM_MODE_DPMS_OFF;
+	}
+
+	c_conn = to_sde_connector(connector);
+
+	mutex_lock(&c_conn->lock);
+	rc = c_conn->dpms_mode;
+	mutex_unlock(&c_conn->lock);
+
+	return rc;
+}
+
 #ifdef CONFIG_DEBUG_FS
 /**
  * sde_connector_init_debugfs - initialize connector debugfs
@@ -758,7 +876,7 @@
 }
 
 static const struct drm_connector_funcs sde_connector_ops = {
-	.dpms =                   drm_atomic_helper_connector_dpms,
+	.dpms =                   sde_connector_dpms,
 	.reset =                  sde_connector_atomic_reset,
 	.detect =                 sde_connector_detect,
 	.destroy =                sde_connector_destroy,
@@ -882,6 +1000,10 @@
 	c_conn->panel = panel;
 	c_conn->display = display;
 
+	c_conn->dpms_mode = DRM_MODE_DPMS_ON;
+	c_conn->lp_mode = 0;
+	c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
+
 	/* cache mmu_id's for later */
 	sde_kms = to_sde_kms(priv->kms);
 	if (sde_kms->vbif[VBIF_NRT]) {
@@ -916,6 +1038,8 @@
 		goto error_cleanup_conn;
 	}
 
+	mutex_init(&c_conn->lock);
+
 	rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
 	if (rc) {
 		SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
@@ -990,6 +1114,10 @@
 	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
 			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
 
+	msm_property_install_range(&c_conn->property_info, "autorefresh",
+			0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0,
+			CONNECTOR_PROP_AUTOREFRESH);
+
 	/* enum/bitmask properties */
 	msm_property_install_enum(&c_conn->property_info, "topology_name",
 			DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
@@ -999,6 +1127,10 @@
 			0, 1, e_topology_control,
 			ARRAY_SIZE(e_topology_control),
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
+	msm_property_install_enum(&c_conn->property_info, "LP",
+			0, 0, e_power_mode,
+			ARRAY_SIZE(e_power_mode),
+			CONNECTOR_PROP_LP);
 
 	rc = msm_property_install_get_status(&c_conn->property_info);
 	if (rc) {
@@ -1020,6 +1152,7 @@
 		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
+	mutex_destroy(&c_conn->lock);
 	sde_fence_deinit(&c_conn->retire_fence);
 error_cleanup_conn:
 	drm_connector_cleanup(&c_conn->base);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 601299e..71e64e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -121,6 +121,17 @@
 	int (*get_info)(struct msm_display_info *info, void *display);
 
 	/**
+	 * get_topology - retrieve current topology for the mode selected
+	 * @drm_mode: Display mode set for the display
+	 * @topology: Out parameter. Topology for the mode.
+	 * @max_mixer_width: max width supported by HW layer mixer
+	 * Returns: Zero on success
+	 */
+	int (*get_topology)(const struct drm_display_mode *drm_mode,
+			struct msm_display_topology *topology,
+			u32 max_mixer_width);
+
+	/**
 	 * enable_event - notify display of event registration/unregistration
 	 * @connector: Pointer to drm connector structure
 	 * @event_idx: SDE connector event index
@@ -158,6 +169,20 @@
 	 * @enable: State of clks
 	 */
 	int (*clk_ctrl)(void *handle, u32 type, u32 state);
+
+	/**
+	 * set_power - update dpms setting
+	 * @connector: Pointer to drm connector structure
+	 * @power_mode: One of the following,
+	 *              SDE_MODE_DPMS_ON
+	 *              SDE_MODE_DPMS_LP1
+	 *              SDE_MODE_DPMS_LP2
+	 *              SDE_MODE_DPMS_OFF
+	 * @display: Pointer to private display structure
+	 * Returns: Zero on success
+	 */
+	int (*set_power)(struct drm_connector *connector,
+			int power_mode, void *display);
 };
 
 /**
@@ -192,8 +217,12 @@
  * @mmu_secure: MMU id for secure buffers
  * @mmu_unsecure: MMU id for unsecure buffers
  * @name: ASCII name of connector
+ * @lock: Mutex lock object for this structure
  * @retire_fence: Retire fence context reference
  * @ops: Local callback function pointer table
+ * @dpms_mode: DPMS property setting from user space
+ * @lp_mode: LP property setting from user space
+ * @last_panel_power_mode: Last consolidated dpms/lp mode setting
  * @property_info: Private structure for generic property handling
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
@@ -215,8 +244,12 @@
 
 	char name[SDE_CONNECTOR_NAME_SIZE];
 
+	struct mutex lock;
 	struct sde_fence_context retire_fence;
 	struct sde_connector_ops ops;
+	int dpms_mode;
+	int lp_mode;
+	int last_panel_power_mode;
 
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
@@ -381,6 +414,13 @@
 void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
 
 /**
+ * sde_connector_get_dpms - query dpms setting
+ * @connector: Pointer to drm connector structure
+ * Returns: Current DPMS setting for connector
+ */
+int sde_connector_get_dpms(struct drm_connector *connector);
+
+/**
  * sde_connector_trigger_event - indicate that an event has occurred
  *	Any callbacks that have been registered against this event will
  *	be called from the same thread context.
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 6bae083..1bd7654 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -808,6 +808,44 @@
 	return 0;
 }
 
+static int _sde_crtc_check_autorefresh(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *crtc_state;
+	struct drm_connector *conn;
+	struct drm_connector_state *conn_state;
+	int i;
+
+	if (!crtc || !state)
+		return -EINVAL;
+
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	if (sde_kms_rect_is_null(&crtc_state->crtc_roi))
+		return 0;
+
+	/* partial update active, check if autorefresh is also requested */
+	for_each_connector_in_state(state->state, conn, conn_state, i) {
+		uint64_t autorefresh;
+
+		if (!conn_state || conn_state->crtc != crtc)
+			continue;
+
+		autorefresh = sde_connector_get_property(conn_state,
+				CONNECTOR_PROP_AUTOREFRESH);
+		if (autorefresh) {
+			SDE_ERROR(
+				"%s: autorefresh & partial crtc roi incompatible %llu\n",
+					sde_crtc->name, autorefresh);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static int _sde_crtc_set_lm_roi(struct drm_crtc *crtc,
 		struct drm_crtc_state *state, int lm_idx)
 {
@@ -826,55 +864,82 @@
 	lm_bounds = &crtc_state->lm_bounds[lm_idx];
 	lm_roi = &crtc_state->lm_roi[lm_idx];
 
-	if (!sde_kms_rect_is_null(crtc_roi)) {
-		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
-		if (sde_kms_rect_is_null(lm_roi)) {
-			SDE_ERROR("unsupported R/L only partial update\n");
-			return -EINVAL;
-		}
-	} else {
+	if (sde_kms_rect_is_null(crtc_roi))
 		memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
-	}
+	else
+		sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
 
 	SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
 			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
 
+	/* if any dimension is zero, clear all dimensions for clarity */
+	if (sde_kms_rect_is_null(lm_roi))
+		memset(lm_roi, 0, sizeof(*lm_roi));
+
 	return 0;
 }
 
+static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *crtc_state;
+	u32 disp_bitmask = 0;
+	int i;
+
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+			disp_bitmask |= BIT(i);
+	}
+
+	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
+
+	return disp_bitmask;
+}
+
 static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *crtc_state;
-	const struct sde_rect *roi_prv, *roi_cur;
-	int lm_idx;
+	const struct sde_rect *roi[CRTC_DUAL_MIXERS];
 
 	if (!crtc || !state)
 		return -EINVAL;
 
+	sde_crtc = to_sde_crtc(crtc);
+	crtc_state = to_sde_crtc_state(state);
+
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
+	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("%s: unsupported number of mixers: %d\n",
+				sde_crtc->name, sde_crtc->num_mixers);
+		return -EINVAL;
+	}
+
 	/*
 	 * On certain HW, ROIs must be centered on the split between LMs,
 	 * and be of equal width.
 	 */
+	roi[0] = &crtc_state->lm_roi[0];
+	roi[1] = &crtc_state->lm_roi[1];
 
-	sde_crtc = to_sde_crtc(crtc);
-	crtc_state = to_sde_crtc_state(state);
+	/* if one of the roi is null it's a left/right-only update */
+	if (sde_kms_rect_is_null(roi[0]) || sde_kms_rect_is_null(roi[1]))
+		return 0;
 
-	roi_prv = &crtc_state->lm_roi[0];
-	for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-		roi_cur = &crtc_state->lm_roi[lm_idx];
-
-		/* check lm rois are equal width & first roi ends at 2nd roi */
-		if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
-				(roi_prv->w != roi_cur->w)) {
-			SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
-					sde_crtc->name,
-					lm_idx-1, roi_prv->x, roi_prv->w,
-					lm_idx, roi_cur->x, roi_cur->w);
-			return -EINVAL;
-		}
-		roi_prv = roi_cur;
+	/* check lm rois are equal width & first roi ends at 2nd roi */
+	if (roi[0]->x + roi[0]->w != roi[1]->x || roi[0]->w != roi[1]->w) {
+		SDE_ERROR(
+			"%s: rois not centered and symmetric: roi0 x %d w %d roi1 x %d w %d\n",
+				sde_crtc->name, roi[0]->x, roi[0]->w,
+				roi[1]->x, roi[1]->w);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -953,6 +1018,10 @@
 	if (rc)
 		return rc;
 
+	rc = _sde_crtc_check_autorefresh(crtc, state);
+	if (rc)
+		return rc;
+
 	for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
 		rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
 		if (rc)
@@ -1146,13 +1215,21 @@
  */
 static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *sde_crtc_state;
+	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_mixer *lm;
 
 	int i;
 
+	if (!crtc)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_crtc_state = to_sde_crtc_state(crtc->state);
+	mixer = sde_crtc->mixers;
+
 	SDE_DEBUG("%s\n", sde_crtc->name);
 
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
@@ -1183,9 +1260,19 @@
 	_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
+
 		ctl = mixer[i].hw_ctl;
 		lm = mixer[i].hw_lm;
 
+		if (sde_kms_rect_is_null(lm_roi)) {
+			SDE_DEBUG(
+				"%s: lm%d leave ctl%d mask 0 since null roi\n",
+					sde_crtc->name, lm->idx - LM_0,
+					ctl->idx - CTL_0);
+			continue;
+		}
+
 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 
 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
@@ -1678,9 +1765,9 @@
 		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
 	struct drm_device *dev;
 	unsigned long flags;
-	u32 i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -1711,12 +1798,12 @@
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
-	/* Reset flush mask from previous commit */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
 
-		if (ctl)
-			ctl->ops.clear_pending_flush(ctl);
+		/* encoder will trigger pending mask now */
+		sde_encoder_trigger_kickoff_pending(encoder);
 	}
 
 	/*
@@ -1859,6 +1946,14 @@
 	priv = sde_kms->dev->dev_private;
 	cstate = to_sde_crtc_state(crtc->state);
 
+	/*
+	 * If no mixers has been allocated in sde_crtc_atomic_check(),
+	 * it means we are trying to start a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!sde_crtc->num_mixers))
+		return;
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct sde_encoder_kickoff_params params = { 0 };
 
@@ -1870,6 +1965,8 @@
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
 		params.inline_rotate_prefill = cstate->sbuf_prefill_line;
+		params.affected_displays = _sde_crtc_get_displays_affected(crtc,
+				crtc->state);
 		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
@@ -2074,21 +2171,89 @@
 	crtc->state = &cstate->base;
 }
 
+static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en)
+{
+	if (!sde_crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	} else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+		SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, true);
+	} else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+		SDE_ERROR("crtc%d invalid vblank disable\n",
+				sde_crtc->base.base.id);
+		return -EINVAL;
+	} else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+		SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, false);
+	} else {
+		SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+				sde_crtc->base.base.id,
+				en ? "enable" : "disable",
+				atomic_read(&sde_crtc->vblank_refcount));
+	}
+
+	return 0;
+}
+
+static void sde_crtc_handle_power_event(u32 event_type, void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	SDE_EVT32(DRMID(crtc), event_type);
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
+		/* restore encoder; crtc will be programmed during commit */
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+
+			sde_encoder_virt_restore(encoder);
+		}
+
+	} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
+		struct drm_plane *plane;
+
+		/*
+		 * set revalidate flag in planes, so it will be re-programmed
+		 * in the next frame update
+		 */
+		drm_atomic_crtc_for_each_plane(plane, crtc)
+			sde_plane_set_revalidate(plane, true);
+	}
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc || !crtc->dev || !crtc->state) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
@@ -2103,13 +2268,9 @@
 				crtc->base.id);
 		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
 							SDE_EVTLOG_FUNC_CASE1);
-		drm_for_each_encoder(encoder, crtc->dev) {
-			if (encoder->crtc != crtc)
-				continue;
-			sde_encoder_register_vblank_callback(encoder, NULL,
-						NULL);
-		}
-		atomic_set(&sde_crtc->vblank_refcount, 0);
+		while (atomic_read(&sde_crtc->vblank_refcount))
+			if (_sde_crtc_vblank_no_lock(sde_crtc, false))
+				break;
 	}
 
 	if (atomic_read(&sde_crtc->frame_pending)) {
@@ -2132,6 +2293,10 @@
 		cstate->rsc_update = false;
 	}
 
+	if (sde_crtc->power_event)
+		sde_power_handle_unregister_event(&priv->phandle,
+				sde_crtc->power_event);
+
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 
@@ -2153,14 +2318,16 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret;
 
-	if (!crtc) {
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
+	priv = crtc->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32(DRMID(crtc));
@@ -2183,6 +2350,11 @@
 				sde_crtc->name, node->event);
 	}
 	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	sde_crtc->power_event = sde_power_handle_register_event(
+		&priv->phandle,
+		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+		sde_crtc_handle_power_event, crtc, sde_crtc->name);
 }
 
 struct plane_state {
@@ -2245,6 +2417,10 @@
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
+	/* force a full mode set if active state changed */
+	if (state->active_changed)
+		state->mode_changed = true;
+
 	memset(pipe_staged, 0, sizeof(pipe_staged));
 
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
@@ -2463,7 +2639,7 @@
 int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
 	struct sde_crtc *sde_crtc;
-	int rc = 0;
+	int rc;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -2472,25 +2648,9 @@
 	sde_crtc = to_sde_crtc(crtc);
 
 	mutex_lock(&sde_crtc->crtc_lock);
-	if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
-		SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
-		if (!sde_crtc->suspend)
-			_sde_crtc_vblank_enable_nolock(sde_crtc, true);
-	} else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
-		SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
-		rc = -EINVAL;
-	} else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
-		SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
-		if (!sde_crtc->suspend)
-			_sde_crtc_vblank_enable_nolock(sde_crtc, false);
-	} else {
-		SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
-				crtc->base.id,
-				en ? "enable" : "disable",
-				atomic_read(&sde_crtc->vblank_refcount));
-	}
-
+	rc = _sde_crtc_vblank_no_lock(sde_crtc, en);
 	mutex_unlock(&sde_crtc->crtc_lock);
+
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 7ad0955..ec5ec1d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -138,6 +138,7 @@
  * @event_free_list : List of available event structures
  * @event_lock    : Spinlock around event handling code
  * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @power_event   : registered power event handle
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -187,6 +188,8 @@
 	struct list_head event_free_list;
 	spinlock_t event_lock;
 	bool misr_enable;
+
+	struct sde_power_event *power_event;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -248,10 +251,10 @@
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
- * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
- *                  Origin top left of CRTC.
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
  * @lm_roi        : Current LM ROI, possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @user_roi_list : List of user's requested ROIs as from set property
@@ -274,8 +277,8 @@
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 
-	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect crtc_roi;
+	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
 	struct msm_roi_list user_roi_list;
 
@@ -316,6 +319,21 @@
 }
 
 /**
+ * sde_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int sde_crtc_frame_pending(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+
+	if (!crtc)
+		return -EINVAL;
+
+	sde_crtc = to_sde_crtc(crtc);
+	return atomic_read(&sde_crtc->frame_pending);
+}
+
+/**
  * sde_crtc_vblank - enable or disable vblanks for this crtc
  * @crtc: Pointer to drm crtc object
  * @en: true to enable vblanks, false to disable
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 742ea20..39127e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -34,6 +34,7 @@
 #include "sde_encoder_phys.h"
 #include "sde_power_handle.h"
 #include "sde_hw_dsc.h"
+#include "sde_crtc.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -58,6 +59,56 @@
 
 #define MISR_BUFF_SIZE			256
 
+#define IDLE_TIMEOUT	64
+
+/**
+ * enum sde_enc_rc_events - events for resource control state machine
+ * @SDE_ENC_RC_EVENT_KICKOFF:
+ *	This event happens at NORMAL priority.
+ *	Event that signals the start of the transfer. When this event is
+ *	received, enable MDP/DSI core clocks and request RSC with CMD state.
+ *	Regardless of the previous state, the resource should be in ON state
+ *	at the end of this event.
+ * @SDE_ENC_RC_EVENT_FRAME_DONE:
+ *	This event happens at INTERRUPT level.
+ *	Event signals the end of the data transfer after the PP FRAME_DONE
+ *	event. At the end of this event, a delayed work is scheduled to go to
+ *	IDLE_PC state after IDLE_TIMEOUT time.
+ * @SDE_ENC_RC_EVENT_STOP:
+ *	This event happens at NORMAL priority.
+ *	When this event is received, disable all the MDP/DSI core clocks
+ *	and request RSC with IDLE state. Resource state should be in OFF
+ *	at the end of the event.
+ * @SDE_ENC_RC_EARLY_WAKEUP
+ *	This event happens at NORMAL priority from a work item.
+ *	Event signals that there will be frame update soon and the driver should
+ *	wake up early to update the frame with minimum latency.
+ * @SDE_ENC_RC_EVENT_ENTER_IDLE:
+ *	This event happens at NORMAL priority from a work item.
+ *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ *	This would disable MDP/DSI core clocks and request RSC with IDLE state
+ *	and change the resource state to IDLE.
+ */
+enum sde_enc_rc_events {
+	SDE_ENC_RC_EVENT_KICKOFF = 1,
+	SDE_ENC_RC_EVENT_FRAME_DONE,
+	SDE_ENC_RC_EVENT_STOP,
+	SDE_ENC_RC_EVENT_EARLY_WAKE_UP,
+	SDE_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum sde_enc_rc_states - states that the resource control maintains
+ * @SDE_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @SDE_ENC_RC_STATE_ON: Resource is in ON state
+ * @SDE_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum sde_enc_rc_states {
+	SDE_ENC_RC_STATE_OFF,
+	SDE_ENC_RC_STATE_ON,
+	SDE_ENC_RC_STATE_IDLE
+};
+
 /**
  * struct sde_encoder_virt - virtual encoder. Container of one or more physical
  *	encoders. Virtual encoder manages one "logical" display. Physical
@@ -91,7 +142,18 @@
  * @crtc_frame_event:		callback event
  * @frame_done_timeout:		frame done timeout in Hz
  * @frame_done_timer:		watchdog timer for frame done event
+ * @rsc_client:			rsc client pointer
+ * @rsc_state_init:		boolean to indicate rsc config init
+ * @disp_info:			local copy of msm_display_info struct
  * @misr_enable:		misr enable/disable status
+ * @idle_pc_supported:		indicate if idle power collaps is supported
+ * @rc_lock:			resource control mutex lock to protect
+ *				virt encoder over various state changes
+ * @rc_state:			resource controller state
+ * @delayed_off_work:		delayed worker to schedule disabling of
+ *				clks and resources after IDLE_TIMEOUT time.
+ * @topology:                   topology of the display
+ * @mode_set_complete:          flag to indicate modeset completion
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -120,9 +182,16 @@
 	struct timer_list frame_done_timer;
 
 	struct sde_rsc_client *rsc_client;
+	bool rsc_state_init;
 	struct msm_display_info disp_info;
-	bool rsc_state_update;
 	bool misr_enable;
+
+	bool idle_pc_supported;
+	struct mutex rc_lock;
+	enum sde_enc_rc_states rc_state;
+	struct delayed_work delayed_off_work;
+	struct msm_display_topology topology;
+	bool mode_set_complete;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -184,15 +253,14 @@
 	memset(hw_res, 0, sizeof(*hw_res));
 	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
 
-	if (_sde_is_dsc_enabled(sde_enc))
-		hw_res->needs_dsc = true;
-
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys && phys->ops.get_hw_resources)
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
+
+	hw_res->topology = sde_enc->topology;
 }
 
 void sde_encoder_destroy(struct drm_encoder *drm_enc)
@@ -358,9 +426,18 @@
 	}
 
 	/* Reserve dynamic resources now. Indicating AtomicTest phase */
-	if (!ret)
-		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+	if (!ret) {
+		/*
+		 * Avoid reserving resources when mode set is pending. Topology
+		 * info may not be available to complete reservation.
+		 */
+		if (drm_atomic_crtc_needs_modeset(crtc_state)
+				&& sde_enc->mode_set_complete) {
+			ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
 				conn_state, true);
+			sde_enc->mode_set_complete = false;
+		}
+	}
 
 	if (!ret)
 		drm_mode_set_crtcinfo(adj_mode, 0);
@@ -655,7 +732,7 @@
 	int ret = 0;
 
 	topology = sde_connector_get_topology_name(drm_conn);
-	if (topology == SDE_RM_TOPOLOGY_UNKNOWN) {
+	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
 		return -EINVAL;
 	}
@@ -664,16 +741,15 @@
 	SDE_EVT32(DRMID(&sde_enc->base));
 
 	switch (topology) {
-	case SDE_RM_TOPOLOGY_SINGLEPIPE:
+	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPEMERGE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
 		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
 		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_PPSPLIT:
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
 				topology);
@@ -700,6 +776,11 @@
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	disp_info = &sde_enc->disp_info;
 
+	if (!sde_enc->rsc_client) {
+		SDE_DEBUG("rsc client not created\n");
+		return 0;
+	}
+
 	/**
 	 * only primary command mode panel can request CMD state.
 	 * all other panels/displays can request for VID state including
@@ -710,14 +791,14 @@
 		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
 		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
 
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
 					&& disp_info->is_primary) {
 		rsc_config.fps = disp_info->frame_rate;
 		rsc_config.vtotal = disp_info->vtotal;
 		rsc_config.prefill_lines = disp_info->prefill_lines;
 		rsc_config.jitter = disp_info->jitter;
 		/* update it only once */
-		sde_enc->rsc_state_update = true;
+		sde_enc->rsc_state_init = true;
 
 		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
 			rsc_state, &rsc_config,
@@ -748,6 +829,277 @@
 	return disp_info->is_primary ? sde_enc->rsc_client : NULL;
 }
 
+static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+		bool enable)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct sde_encoder_virt *sde_enc;
+	int i;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
+	SDE_EVT32(DRMID(drm_enc), enable);
+
+	if (!sde_enc->cur_master) {
+		SDE_ERROR("encoder master not set\n");
+		return;
+	}
+
+	if (enable) {
+		/* enable SDE core clks */
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true);
+
+		/* enable DSI clks */
+		sde_connector_clk_ctrl(sde_enc->cur_master->connector, true);
+
+		/* enable all the irq */
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+			if (phys && phys->ops.irq_control)
+				phys->ops.irq_control(phys, true);
+		}
+
+		/* enable RSC */
+		sde_encoder_update_rsc_client(drm_enc, true);
+
+	} else {
+
+		/* disable RSC */
+		sde_encoder_update_rsc_client(drm_enc, false);
+
+		/* disable all the irq */
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys =
+						sde_enc->phys_encs[i];
+
+			if (phys && phys->ops.irq_control)
+				phys->ops.irq_control(phys, false);
+		}
+
+		/* disable DSI clks */
+		sde_connector_clk_ctrl(sde_enc->cur_master->connector, false);
+
+		/* disable SDE core clks */
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+	}
+
+}
+
+static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
+		u32 sw_event)
+{
+	bool schedule_off = false;
+	struct sde_encoder_virt *sde_enc;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	/*
+	 * when idle_pc is not supported, process only KICKOFF and STOP
+	 * event and return early for other events (ie video mode).
+	 */
+	if (!sde_enc->idle_pc_supported &&
+			(sw_event != SDE_ENC_RC_EVENT_KICKOFF &&
+				sw_event != SDE_ENC_RC_EVENT_STOP))
+		return 0;
+
+	SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc_supported:%d\n", sw_event,
+			sde_enc->idle_pc_supported);
+	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+			sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
+
+	switch (sw_event) {
+	case SDE_ENC_RC_EVENT_KICKOFF:
+		/* cancel delayed off work, if any */
+		if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+					sw_event);
+
+		mutex_lock(&sde_enc->rc_lock);
+
+		/* return if the resource control is already in ON state */
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in ON state\n",
+					sw_event);
+			mutex_unlock(&sde_enc->rc_lock);
+			return 0;
+		}
+
+		/* enable all the clks and resources */
+		_sde_encoder_resource_control_helper(drm_enc, true);
+
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE1);
+		sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
+
+	case SDE_ENC_RC_EVENT_FRAME_DONE:
+		/*
+		 * mutex lock is not used as this event happens at interrupt
+		 * context. And locking is not required as, the other events
+		 * like KICKOFF and STOP does a wait-for-idle before executing
+		 * the resource_control
+		 */
+		if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
+			SDE_ERROR_ENC(sde_enc, "sw_event:%d,rc:%d-unexpected\n",
+					sw_event, sde_enc->rc_state);
+			return -EINVAL;
+		}
+
+		/*
+		 * schedule off work item only when there are no
+		 * frames pending
+		 */
+		if (sde_crtc_frame_pending(drm_enc->crtc) > 1) {
+			SDE_DEBUG_ENC(sde_enc, "skip schedule work");
+			return 0;
+		}
+
+		/* schedule delayed off work */
+		schedule_delayed_work(&sde_enc->delayed_off_work,
+					msecs_to_jiffies(IDLE_TIMEOUT));
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_EVTLOG_FUNC_CASE2);
+		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
+				sw_event);
+		break;
+
+	case SDE_ENC_RC_EVENT_STOP:
+		/* cancel delayed off work, if any */
+		if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+					sw_event);
+
+		mutex_lock(&sde_enc->rc_lock);
+
+		/* return if the resource control is already in OFF state */
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
+					sw_event);
+			mutex_unlock(&sde_enc->rc_lock);
+			return 0;
+		}
+
+		/*
+		 * disable the clks and resources only if the resource control
+		 * is in ON state, otherwise the clks and resources would have
+		 * been disabled while going into IDLE state
+		 */
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON)
+			_sde_encoder_resource_control_helper(drm_enc, false);
+
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE3);
+		sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
+
+	case SDE_ENC_RC_EVENT_EARLY_WAKE_UP:
+		/* cancel delayed off work, if any */
+		if (cancel_delayed_work_sync(&sde_enc->delayed_off_work)) {
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+					sw_event);
+			schedule_off = true;
+		}
+
+		mutex_lock(&sde_enc->rc_lock);
+
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				schedule_off, SDE_EVTLOG_FUNC_CASE4);
+
+		/* return if the resource control is in OFF state */
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
+					sw_event);
+			mutex_unlock(&sde_enc->rc_lock);
+			return 0;
+		}
+
+		/*
+		 * enable all the clks and resources if resource control is
+		 * coming out of IDLE state
+		 */
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
+			_sde_encoder_resource_control_helper(drm_enc, true);
+			sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+			schedule_off = true;
+		}
+
+		/*
+		 * schedule off work when there are no frames pending and
+		 * 1. early wakeup cancelled off work
+		 * 2. early wakeup changed the rc_state to ON - this is to
+		 *	handle cases where early wakeup is called but no
+		 *	frame updates
+		 */
+		if (schedule_off && !sde_crtc_frame_pending(drm_enc->crtc)) {
+			/* schedule delayed off work */
+			schedule_delayed_work(&sde_enc->delayed_off_work,
+					msecs_to_jiffies(IDLE_TIMEOUT));
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
+					sw_event);
+		}
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
+
+	case SDE_ENC_RC_EVENT_ENTER_IDLE:
+		mutex_lock(&sde_enc->rc_lock);
+
+		if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
+			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
+					sw_event, sde_enc->rc_state);
+			mutex_unlock(&sde_enc->rc_lock);
+			return 0;
+		}
+
+		/* disable all the clks and resources */
+		_sde_encoder_resource_control_helper(drm_enc, false);
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_ENC_RC_STATE_IDLE, SDE_EVTLOG_FUNC_CASE5);
+		sde_enc->rc_state = SDE_ENC_RC_STATE_IDLE;
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
+
+	default:
+		SDE_ERROR("unexpected sw_event: %d\n", sw_event);
+		break;
+	}
+
+	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+			sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
+	return 0;
+}
+
+static void sde_encoder_off_work(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct sde_encoder_virt *sde_enc = container_of(dw,
+			struct sde_encoder_virt, delayed_off_work);
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	sde_encoder_resource_control(&sde_enc->base,
+			SDE_ENC_RC_EVENT_ENTER_IDLE);
+}
+
 static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 				      struct drm_display_mode *mode,
 				      struct drm_display_mode *adj_mode)
@@ -757,6 +1109,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_connector *sde_conn = NULL;
 	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	int i = 0, ret;
 
@@ -786,6 +1139,17 @@
 		return;
 	}
 
+	sde_conn = to_sde_connector(conn);
+	if (sde_conn) {
+		ret = sde_conn->ops.get_topology(adj_mode, &sde_enc->topology,
+				sde_kms->catalog->max_mixer_width);
+		if (ret) {
+			SDE_ERROR_ENC(sde_enc,
+				"invalid topology for the mode\n");
+			return;
+		}
+	}
+
 	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
 	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
 			conn->state, false);
@@ -826,63 +1190,141 @@
 				phys->ops.mode_set(phys, mode, adj_mode);
 		}
 	}
+
+	sde_enc->mode_set_complete = true;
+}
+
+static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct sde_hw_mdp *hw_mdptop;
+	int i = 0;
+	int ret = 0;
+	struct sde_watchdog_te_status te_cfg = { 0 };
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+	priv = drm_enc->dev->dev_private;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc || !sde_enc->cur_master) {
+		SDE_ERROR("invalid sde encoder/master\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	hw_mdptop = sde_kms->hw_mdp;
+
+	if (!hw_mdptop) {
+		SDE_ERROR("invalid mdptop\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	}
+
+	if (sde_enc->cur_master->hw_mdptop &&
+			sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+		sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+				sde_enc->cur_master->hw_mdptop,
+				sde_kms->catalog);
+
+	if (_sde_is_dsc_enabled(sde_enc)) {
+		ret = _sde_encoder_dsc_setup(sde_enc);
+		if (ret)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
+	}
+
+	if (hw_mdptop->ops.setup_vsync_sel) {
+		for (i = 0; i < sde_enc->num_phys_encs; i++)
+			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+		te_cfg.pp_count = sde_enc->num_phys_encs;
+		te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
+		hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
+				sde_enc->disp_info.is_te_using_watchdog_timer);
+	}
+}
+
+void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && (phys != sde_enc->cur_master) && phys->ops.restore)
+			phys->ops.restore(phys);
+	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->ops.restore)
+		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
+
+	_sde_encoder_virt_enable_helper(drm_enc);
 }
 
 static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
-	int i = 0;
-	int ret = 0;
+	int i, ret = 0;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
-	} else if (!drm_enc->dev) {
-		SDE_ERROR("invalid dev\n");
-		return;
-	} else if (!drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid dev_private\n");
-		return;
 	}
-
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
 
-	ret = _sde_encoder_power_enable(sde_enc, true);
-	if (ret)
-		return;
-
 	sde_enc->cur_master = NULL;
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+			SDE_DEBUG_ENC(sde_enc, "master is now idx %d\n", i);
+			sde_enc->cur_master = phys;
+			break;
+		}
+	}
+
+	if (!sde_enc->cur_master) {
+		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+		return;
+	}
+
+	ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
+				ret);
+		return;
+	}
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys) {
-			if (phys->ops.is_master && phys->ops.is_master(phys)) {
-				SDE_DEBUG_ENC(sde_enc,
-						"master is now idx %d\n", i);
-				sde_enc->cur_master = phys;
-			} else if (phys->ops.enable) {
-				phys->ops.enable(phys);
-			}
-		}
+		if (phys && (phys != sde_enc->cur_master) && phys->ops.enable)
+			phys->ops.enable(phys);
 	}
 
-	sde_encoder_update_rsc_client(drm_enc, true);
-
-	if (!sde_enc->cur_master)
-		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
-	else if (sde_enc->cur_master->ops.enable)
+	if (sde_enc->cur_master->ops.enable)
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n",
-					ret);
-	}
+	_sde_encoder_virt_enable_helper(drm_enc);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -914,9 +1356,8 @@
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys) {
-			if (phys->ops.disable && !phys->ops.is_master(phys))
-				phys->ops.disable(phys);
+		if (phys && phys->ops.disable && !phys->ops.is_master(phys)) {
+			phys->ops.disable(phys);
 			phys->connector = NULL;
 		}
 	}
@@ -927,17 +1368,19 @@
 		del_timer_sync(&sde_enc->frame_done_timer);
 	}
 
-	sde_encoder_update_rsc_client(drm_enc, false);
-
 	if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
 		sde_enc->cur_master->ops.disable(sde_enc->cur_master);
 
-	sde_enc->cur_master = NULL;
-	SDE_DEBUG_ENC(sde_enc, "cleared master\n");
+	sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
+
+	if (sde_enc->cur_master) {
+		sde_enc->cur_master->connector = NULL;
+		sde_enc->cur_master = NULL;
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
 
 	sde_rm_release(&sde_kms->rm, drm_enc);
-
-	_sde_encoder_power_enable(sde_enc, false);
 }
 
 static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
@@ -1066,6 +1509,9 @@
 		atomic_set(&sde_enc->frame_done_timeout, 0);
 		del_timer(&sde_enc->frame_done_timer);
 
+		sde_encoder_resource_control(drm_enc,
+				SDE_ENC_RC_EVENT_FRAME_DONE);
+
 		if (sde_enc->crtc_frame_event_cb)
 			sde_enc->crtc_frame_event_cb(
 					sde_enc->crtc_frame_event_cb_data,
@@ -1097,6 +1543,14 @@
 		return;
 	}
 
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip flush pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
+
 	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
@@ -1118,11 +1572,21 @@
  */
 static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
 {
+	struct sde_hw_ctl *ctl;
+
 	if (!phys) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
+	ctl = phys->hw_ctl;
+	if (phys->split_role == ENC_ROLE_SKIP) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
+				"skip start pp%d ctl%d\n",
+				phys->hw_pp->idx - PINGPONG_0,
+				ctl->idx - CTL_0);
+		return;
+	}
 	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
 		phys->ops.trigger_start(phys);
 }
@@ -1241,7 +1705,7 @@
 	/* don't perform flush/start operations for slave encoders */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_UNKNOWN;
+		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
 
 		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
 			continue;
@@ -1254,9 +1718,13 @@
 			topology = sde_connector_get_topology_name(
 					phys->connector);
 
-		/* don't wait on ppsplit slaves, they dont register irqs */
+		/*
+		 * don't wait on ppsplit slaves or skipped encoders because
+		 * they dont receive irqs
+		 */
 		if (!(topology == SDE_RM_TOPOLOGY_PPSPLIT &&
-				phys->split_role == ENC_ROLE_SLAVE))
+				phys->split_role == ENC_ROLE_SLAVE) &&
+				phys->split_role != ENC_ROLE_SKIP)
 			set_bit(i, sde_enc->frame_busy_mask);
 
 		if (!phys->ops.needs_single_flush ||
@@ -1279,6 +1747,92 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	int i, num_active_phys;
+	bool master_assigned = false;
+
+	if (!drm_enc || !params)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (sde_enc->num_phys_encs <= 1)
+		return;
+
+	/* count bits set */
+	num_active_phys = hweight_long(params->affected_displays);
+
+	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
+			params->affected_displays, num_active_phys);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		enum sde_enc_split_role prv_role, new_role;
+		bool active;
+
+		phys = sde_enc->phys_encs[i];
+		if (!phys || !phys->ops.update_split_role)
+			continue;
+
+		active = test_bit(i, &params->affected_displays);
+		prv_role = phys->split_role;
+
+		if (active && num_active_phys == 1)
+			new_role = ENC_ROLE_SOLO;
+		else if (active && !master_assigned)
+			new_role = ENC_ROLE_MASTER;
+		else if (active)
+			new_role = ENC_ROLE_SLAVE;
+		else
+			new_role = ENC_ROLE_SKIP;
+
+		phys->ops.update_split_role(phys, new_role);
+		if (new_role == ENC_ROLE_SOLO || new_role == ENC_ROLE_MASTER) {
+			sde_enc->cur_master = phys;
+			master_assigned = true;
+		}
+
+		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active);
+	}
+}
+
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	unsigned int i;
+	struct sde_hw_ctl *ctl;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+
+		if (phys && phys->hw_ctl) {
+			ctl = phys->hw_ctl;
+			if (ctl->ops.clear_pending_flush)
+				ctl->ops.clear_pending_flush(ctl);
+
+			/* update only for command mode primary ctl */
+			if ((phys == sde_enc->cur_master) &&
+			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			    && ctl->ops.trigger_pending)
+				ctl->ops.trigger_pending(ctl);
+		}
+	}
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1288,8 +1842,8 @@
 	unsigned int i;
 	int rc;
 
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!drm_enc || !params) {
+		SDE_ERROR("invalid args\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
@@ -1308,8 +1862,11 @@
 		}
 	}
 
+	sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+
 	/* if any phys needs reset, reset all phys, in-order */
 	if (needs_hw_reset) {
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			phys = sde_enc->phys_encs[i];
 			if (phys && phys->ops.hw_reset)
@@ -1317,6 +1874,8 @@
 		}
 	}
 
+	_sde_encoder_update_master(drm_enc, params);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
@@ -1798,6 +2357,9 @@
 
 	phys_params.comp_type = disp_info->comp_info.comp_type;
 
+	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+		sde_enc->idle_pc_supported = sde_kms->catalog->has_idle_pc;
+
 	mutex_lock(&sde_enc->enc_lock);
 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
 		/*
@@ -1958,6 +2520,9 @@
 		sde_enc->rsc_client = NULL;
 	}
 
+	mutex_init(&sde_enc->rc_lock);
+	INIT_DELAYED_WORK(&sde_enc->delayed_off_work, sde_encoder_off_work);
+
 	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
 
 	SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 5795e04..b756313 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -33,23 +33,27 @@
  * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
  * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
  * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
- * @needs_dsc:	Request to allocate DSC block
- * @display_num_of_h_tiles:
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
  */
 struct sde_encoder_hw_resources {
 	enum sde_intf_mode intfs[INTF_MAX];
 	enum sde_intf_mode wbs[WB_MAX];
 	bool needs_cdm;
-	bool needs_dsc;
 	u32 display_num_of_h_tiles;
+	struct msm_display_topology topology;
 };
 
 /**
  * sde_encoder_kickoff_params - info encoder requires at kickoff
  * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
  */
 struct sde_encoder_kickoff_params {
 	u32 inline_rotate_prefill;
+	unsigned long affected_displays;
 };
 
 /**
@@ -101,6 +105,13 @@
 		struct sde_encoder_kickoff_params *params);
 
 /**
+ * sde_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
@@ -124,6 +135,12 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
  * enum sde_encoder_property - property tags for sde enoder
  * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
  */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index da155b0..3d6dc32 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -41,11 +41,13 @@
  * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
  * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
  * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ * @ENC_ROLE_SKIP:	This encoder is not participating in kickoffs
  */
 enum sde_enc_split_role {
 	ENC_ROLE_SOLO,
 	ENC_ROLE_MASTER,
-	ENC_ROLE_SLAVE
+	ENC_ROLE_SLAVE,
+	ENC_ROLE_SKIP
 };
 
 /**
@@ -117,6 +119,9 @@
  * @collect_misr:		Collects MISR data on frame update
  * @hw_reset:			Issue HW recovery such as CTL reset and clear
  *				SDE_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @update_split_role:		Update the split role of the phys enc
+ * @restore:			Restore all the encoder configs.
  */
 
 struct sde_encoder_phys_ops {
@@ -150,6 +155,10 @@
 				bool enable, u32 frame_count);
 	u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
 	void (*hw_reset)(struct sde_encoder_phys *phys_enc);
+	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
+	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
+			enum sde_enc_split_role role);
+	void (*restore)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -163,6 +172,7 @@
 	INTR_IDX_VSYNC,
 	INTR_IDX_PINGPONG,
 	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
 	INTR_IDX_RDPTR,
 	INTR_IDX_MAX,
 };
@@ -196,6 +206,8 @@
  *				vs. the number of done/vblank irqs. Should hover
  *				between 0-2 Incremented when a new kickoff is
  *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
  */
 struct sde_encoder_phys {
@@ -219,12 +231,14 @@
 	atomic_t vblank_refcount;
 	atomic_t vsync_cnt;
 	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
 	atomic_t pending_kickoff_cnt;
 	wait_queue_head_t pending_kickoff_wq;
 };
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
 {
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
 	return atomic_inc_return(&phys->pending_kickoff_cnt);
 }
 
@@ -263,7 +277,6 @@
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
-	int intf_idx;
 	int stream_sel;
 	int irq_idx[INTR_IDX_MAX];
 	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
@@ -419,8 +432,7 @@
 
 	topology = sde_connector_get_topology_name(phys_enc->connector);
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE &&
-			phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_NONE)
+			topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 5b59828..572bd9e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -21,18 +21,21 @@
 #define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
 		(e)->base.parent->base.id : -1, \
-		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
 
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
 #define PP_TIMEOUT_MAX_TRIALS	10
 
+/* wait for 2 vyncs only */
+#define CTL_START_TIMEOUT_MS	32
+
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -57,6 +60,46 @@
 	return true;
 }
 
+static void _sde_encoder_phys_cmd_update_flush_mask(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.get_bitmask_intf ||
+			!ctl->ops.update_pending_flush)
+		return;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+			ctl->idx - CTL_0, flush_mask);
+}
+
+static void _sde_encoder_phys_cmd_update_intf_cfg(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_hw_ctl *ctl;
+	struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -130,11 +173,35 @@
 	if (!cmd_enc)
 		return;
 
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 }
 
+static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+	struct sde_hw_ctl *ctl;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	if (!phys_enc->hw_ctl)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
 static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
 {
 	enum sde_rm_topology_name topology;
@@ -240,7 +307,7 @@
 	if (ret <= 0) {
 		/* read and clear interrupt */
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				INTR_IDX_PINGPONG, true);
+				cmd_enc->irq_idx[INTR_IDX_PINGPONG], true);
 		if (irq_status) {
 			unsigned long flags;
 			SDE_EVT32(DRMID(phys_enc->parent),
@@ -295,8 +362,13 @@
 		return -EINVAL;
 	}
 
-	idx_lookup = (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN) ?
-			cmd_enc->intf_idx : phys_enc->hw_pp->idx;
+	if (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN)
+		idx_lookup = phys_enc->intf_idx;
+	else if (intr_type == SDE_IRQ_TYPE_CTL_START)
+		idx_lookup = phys_enc->hw_ctl ? phys_enc->hw_ctl->idx : -1;
+	else
+		idx_lookup = phys_enc->hw_pp->idx;
+
 	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
 			intr_type, idx_lookup);
 	if (cmd_enc->irq_idx[idx] < 0) {
@@ -363,6 +435,92 @@
 	return 0;
 }
 
+static int sde_encoder_phys_cmd_control_vblank_irq(
+		struct sde_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	int ret = 0;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	/* Slave encoders don't report vblank */
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+			__builtin_return_address(0),
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+				INTR_IDX_RDPTR,
+				sde_encoder_phys_cmd_pp_rd_ptr_irq,
+				"pp_rd_ptr");
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				INTR_IDX_RDPTR);
+
+end:
+	if (ret)
+		SDE_ERROR_CMDENC(cmd_enc,
+				"control vblank irq error %d, enable %d\n",
+				ret, enable);
+
+	return ret;
+}
+
+void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct sde_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		return;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+	if (enable) {
+		sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_PING_PONG_COMP,
+				INTR_IDX_PINGPONG,
+				sde_encoder_phys_cmd_pp_tx_done_irq,
+				"pp_tx_done");
+
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+		sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_INTF_UNDER_RUN,
+				INTR_IDX_UNDERRUN,
+				sde_encoder_phys_cmd_underrun_irq,
+				"underrun");
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_CTL_START,
+				INTR_IDX_CTL_START,
+				sde_encoder_phys_cmd_ctl_start_irq,
+				"ctl_start");
+	} else {
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_CTL_START);
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_UNDERRUN);
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_PINGPONG);
+	}
+}
+
 static void sde_encoder_phys_cmd_tearcheck_config(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -441,12 +599,11 @@
 	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
 }
 
-static void sde_encoder_phys_cmd_pingpong_config(
+static void _sde_encoder_phys_cmd_pingpong_config(
 		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
 	if (!phys_enc || !phys_enc->hw_ctl ||
 			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
@@ -458,13 +615,7 @@
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	intf_cfg.intf = cmd_enc->intf_idx;
-	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
-	intf_cfg.stream_sel = cmd_enc->stream_sel;
-	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
-
-	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
-
+	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -477,59 +628,33 @@
 	return _sde_encoder_phys_is_ppsplit(phys_enc);
 }
 
-static int sde_encoder_phys_cmd_control_vblank_irq(
-		struct sde_encoder_phys *phys_enc,
-		bool enable)
+static void sde_encoder_phys_cmd_enable_helper(
+		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	int ret = 0;
+	struct sde_hw_ctl *ctl;
+	u32 flush_mask = 0;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
 	}
 
-	/* Slave encoders don't report vblank */
-	if (!sde_encoder_phys_cmd_is_master(phys_enc))
-		goto end;
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
 
-	SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
-			__builtin_return_address(0),
-			enable, atomic_read(&phys_enc->vblank_refcount));
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
 
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
-			enable, atomic_read(&phys_enc->vblank_refcount));
-
-	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
-		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
-				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-				INTR_IDX_RDPTR,
-				sde_encoder_phys_cmd_pp_rd_ptr_irq,
-				"pp_rd_ptr");
-	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
-		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				INTR_IDX_RDPTR);
-
-end:
-	if (ret)
-		SDE_ERROR_CMDENC(cmd_enc,
-				"control vblank irq error %d, enable %d\n",
-				ret, enable);
-
-	return ret;
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
 }
 
 static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_ctl *ctl;
-	u32 flush_mask = 0;
-	int ret;
 
-	if (!phys_enc || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+	if (!phys_enc) {
+		SDE_ERROR("invalid phys encoder\n");
 		return;
 	}
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
@@ -539,49 +664,8 @@
 		return;
 	}
 
-	sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
-
-	sde_encoder_phys_cmd_pingpong_config(phys_enc);
-
-	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
-		goto update_flush;
-
-	/* Both master and slave need to register for pp_tx_done */
-	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
-			SDE_IRQ_TYPE_PING_PONG_COMP,
-			INTR_IDX_PINGPONG,
-			sde_encoder_phys_cmd_pp_tx_done_irq,
-			"pp_tx_done");
-	if (ret)
-		return;
-
-	ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
-	if (ret) {
-		sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				INTR_IDX_PINGPONG);
-		return;
-	}
-
-	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
-			SDE_IRQ_TYPE_INTF_UNDER_RUN,
-			INTR_IDX_UNDERRUN,
-			sde_encoder_phys_cmd_underrun_irq,
-			"underrun");
-	if (ret) {
-		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
-		sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				INTR_IDX_PINGPONG);
-		return;
-	}
-
-update_flush:
-	ctl = phys_enc->hw_ctl;
-	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
-	ctl->ops.update_pending_flush(ctl, flush_mask);
+	sde_encoder_phys_cmd_enable_helper(phys_enc);
 	phys_enc->enable_state = SDE_ENC_ENABLED;
-
-	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
-			ctl->idx - CTL_0, flush_mask);
 }
 
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
@@ -613,21 +697,9 @@
 			SDE_EVT32(DRMID(phys_enc->parent),
 					phys_enc->hw_pp->idx - PINGPONG_0, ret);
 		}
-
-		sde_encoder_phys_cmd_unregister_irq(
-				phys_enc, INTR_IDX_UNDERRUN);
-		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
-		sde_encoder_phys_cmd_unregister_irq(
-				phys_enc, INTR_IDX_PINGPONG);
 	}
 
 	phys_enc->enable_state = SDE_ENC_DISABLED;
-
-	if (atomic_read(&phys_enc->vblank_refcount))
-		SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
-				phys_enc->parent->base.id,
-				phys_enc->split_role,
-				atomic_read(&phys_enc->vblank_refcount));
 }
 
 static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
@@ -655,7 +727,7 @@
 		return;
 	}
 	SDE_DEBUG_CMDENC(cmd_enc, "\n");
-	hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
@@ -687,24 +759,93 @@
 	}
 }
 
+static int _sde_encoder_phys_cmd_wait_for_ctl_start(
+		struct sde_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct sde_hw_ctl *ctl;
+	u32 irq_status;
+	struct sde_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc->hw_ctl) {
+		SDE_ERROR("invalid ctl\n");
+		return -EINVAL;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	rc = sde_encoder_helper_wait_event_timeout(DRMID(phys_enc->parent),
+			ctl->idx - CTL_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_ctlstart_cnt,
+			CTL_START_TIMEOUT_MS);
+	if (rc <= 0) {
+		/* read and clear interrupt */
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				cmd_enc->irq_idx[INTR_IDX_CTL_START], true);
+		if (irq_status) {
+			unsigned long flags;
+
+			SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
+			SDE_DEBUG_CMDENC(cmd_enc,
+					"ctl:%d start done but irq not triggered\n",
+					ctl->idx - CTL_0);
+			local_irq_save(flags);
+			sde_encoder_phys_cmd_ctl_start_irq(cmd_enc,
+					INTR_IDX_CTL_START);
+			local_irq_restore(flags);
+			rc = 0;
+		} else {
+			SDE_ERROR("ctl start interrupt wait failed\n");
+			rc = -EINVAL;
+		}
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
+	int rc = 0;
+	struct sde_encoder_phys_cmd *cmd_enc;
 
-	if (cmd_enc->serialize_wait4pp)
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (sde_encoder_phys_cmd_is_master(phys_enc))
+		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
 		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
 
-	/*
-	 * following statement is true serialize_wait4pp is false.
-	 *
-	 * Since ctl_start "commits" the transaction to hardware, and the
-	 * tearcheck block takes it from there, there is no need to have a
-	 * separate wait for committed, a la wait-for-vsync in video mode
-	 */
+	return rc;
+}
 
-	return 0;
+static void sde_encoder_phys_cmd_update_split_role(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_enc_split_role role)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+		to_sde_encoder_phys_cmd(phys_enc);
+	enum sde_enc_split_role old_role = phys_enc->split_role;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
+			old_role, role);
+
+	phys_enc->split_role = role;
+	if (role == ENC_ROLE_SKIP || role == old_role)
+		return;
+
+	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
+	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
 }
 
 static void sde_encoder_phys_cmd_init_ops(
@@ -723,6 +864,9 @@
 	ops->trigger_start = sde_encoder_helper_trigger_start;
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
+	ops->irq_control = sde_encoder_phys_cmd_irq_control;
+	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
+	ops->restore = sde_encoder_phys_cmd_enable_helper;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -750,8 +894,6 @@
 		goto fail_mdp_init;
 	}
 	phys_enc->hw_mdptop = hw_mdp;
-
-	cmd_enc->intf_idx = p->intf_idx;
 	phys_enc->intf_idx = p->intf_idx;
 
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -768,6 +910,7 @@
 		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 29f00f7..5cb84b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -227,16 +227,18 @@
 	struct sde_encoder_phys_vid *vid_enc =
 		to_sde_encoder_phys_vid(phys_enc);
 	struct intf_prog_fetch f = { 0 };
-	struct intf_timing_params *timing = &vid_enc->timing_params;
+	struct intf_timing_params *timing;
 	u32 vfp_fetch_lines = 0;
 	u32 horiz_total = 0;
 	u32 vert_total = 0;
 	u32 rot_fetch_start_vsync_counter = 0;
 	unsigned long lock_flags;
 
-	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_rot_start))
+	if (!phys_enc || !vid_enc->hw_intf ||
+			!vid_enc->hw_intf->ops.setup_rot_start)
 		return;
 
+	timing = &vid_enc->timing_params;
 	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
 	if (vfp_fetch_lines && rot_fetch_lines) {
 		vert_total = get_vertical_total(timing);
@@ -389,10 +391,24 @@
 			phys_enc);
 }
 
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc)
+		return false;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+		return true;
+
+	return false;
+}
+
 static bool sde_encoder_phys_vid_needs_single_flush(
 		struct sde_encoder_phys *phys_enc)
 {
-	return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
+	return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
 }
 
 static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
@@ -678,7 +694,7 @@
 			KICKOFF_TIMEOUT_MS);
 	if (ret <= 0) {
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				INTR_IDX_VSYNC, true);
+				vid_enc->irq_idx[INTR_IDX_VSYNC], true);
 		if (irq_status) {
 			SDE_EVT32(DRMID(phys_enc->parent),
 					vid_enc->hw_intf->idx - INTF_0);
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 46823b6..c83472a 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -16,6 +16,8 @@
 #include "sde_kms.h"
 #include "sde_fence.h"
 
+#define TIMELINE_VAL_LENGTH		128
+
 void *sde_sync_get(uint64_t fd)
 {
 	/* force signed compare, fdget accepts an int argument */
@@ -31,14 +33,31 @@
 signed long sde_sync_wait(void *fnc, long timeout_ms)
 {
 	struct fence *fence = fnc;
+	int rc;
+	char timeline_str[TIMELINE_VAL_LENGTH];
 
 	if (!fence)
 		return -EINVAL;
 	else if (fence_is_signaled(fence))
 		return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
 
-	return fence_wait_timeout(fence, true,
+	rc = fence_wait_timeout(fence, true,
 				msecs_to_jiffies(timeout_ms));
+	if (!rc || (rc == -EINVAL)) {
+		if (fence->ops->timeline_value_str)
+			fence->ops->timeline_value_str(fence,
+					timeline_str, TIMELINE_VAL_LENGTH);
+
+		SDE_ERROR(
+			"fence driver name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
+			fence->ops->get_driver_name(fence),
+			fence->ops->get_timeline_name(fence),
+			fence->seqno, timeline_str,
+			fence->ops->signaled ?
+				fence->ops->signaled(fence) : 0xffffffff);
+	}
+
+	return rc;
 }
 
 uint32_t sde_sync_get_name_prefix(void *fence)
@@ -120,10 +139,9 @@
 	struct sde_fence *f = to_sde_fence(fence);
 	struct sde_fence *fc, *next;
 	struct sde_fence_context *ctx = f->ctx;
-	unsigned long flags;
 	bool release_kref = false;
 
-	spin_lock_irqsave(&ctx->lock, flags);
+	spin_lock(&ctx->list_lock);
 	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
 				 fence_list) {
 		/* fence release called before signal */
@@ -133,7 +151,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&ctx->lock, flags);
+	spin_unlock(&ctx->list_lock);
 
 	/* keep kput outside spin_lock because it may release ctx */
 	if (release_kref)
@@ -179,7 +197,6 @@
 	struct sync_file *sync_file;
 	signed int fd = -EINVAL;
 	struct sde_fence_context *ctx = fence_ctx;
-	unsigned long flags;
 
 	if (!ctx) {
 		SDE_ERROR("invalid context\n");
@@ -190,8 +207,9 @@
 	if (!sde_fence)
 		return -ENOMEM;
 
-	snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "fence%u", val);
-
+	sde_fence->ctx = fence_ctx;
+	snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
+						sde_fence->ctx->name, val);
 	fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
 		ctx->context, val);
 
@@ -214,13 +232,13 @@
 	}
 
 	fd_install(fd, sync_file->file);
-
-	spin_lock_irqsave(&ctx->lock, flags);
-	sde_fence->ctx = fence_ctx;
 	sde_fence->fd = fd;
-	list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
 	kref_get(&ctx->kref);
-	spin_unlock_irqrestore(&ctx->lock, flags);
+
+	spin_lock(&ctx->list_lock);
+	list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
+	spin_unlock(&ctx->list_lock);
+
 exit:
 	return fd;
 }
@@ -241,6 +259,7 @@
 	ctx->context = fence_context_alloc(1);
 
 	spin_lock_init(&ctx->lock);
+	spin_lock_init(&ctx->list_lock);
 	INIT_LIST_HEAD(&ctx->fence_list_head);
 
 	return 0;
@@ -314,7 +333,8 @@
 {
 	unsigned long flags;
 	struct sde_fence *fc, *next;
-	uint32_t count = 0;
+	bool is_signaled = false;
+	struct list_head local_list_head;
 
 	if (!ctx) {
 		SDE_ERROR("invalid ctx, %pK\n", ctx);
@@ -323,37 +343,45 @@
 		return;
 	}
 
+	INIT_LIST_HEAD(&local_list_head);
+
 	spin_lock_irqsave(&ctx->lock, flags);
 	if ((int)(ctx->done_count - ctx->commit_count) < 0) {
 		++ctx->done_count;
+		SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
+					ctx->commit_count, ctx->done_count);
 	} else {
 		SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
 					ctx->done_count, ctx->commit_count);
-		goto end;
+		spin_unlock_irqrestore(&ctx->lock, flags);
+		return;
 	}
-
-	if (list_empty(&ctx->fence_list_head)) {
-		SDE_DEBUG("nothing to trigger!-no get_prop call\n");
-		goto end;
-	}
-
-	SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
-					ctx->commit_count, ctx->done_count);
-
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
-				 fence_list) {
-		if (fence_is_signaled_locked(&fc->base)) {
-			list_del_init(&fc->fence_list);
-			count++;
-		}
-	}
+	spin_unlock_irqrestore(&ctx->lock, flags);
 
 	SDE_EVT32(ctx->drm_id, ctx->done_count);
 
-end:
-	spin_unlock_irqrestore(&ctx->lock, flags);
+	spin_lock(&ctx->list_lock);
+	if (list_empty(&ctx->fence_list_head)) {
+		SDE_DEBUG("nothing to trigger!-no get_prop call\n");
+		spin_unlock(&ctx->list_lock);
+		return;
+	}
 
-	/* keep this outside spin_lock because same ctx may be released */
-	for (; count > 0; count--)
-		kref_put(&ctx->kref, sde_fence_destroy);
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list)
+		list_move(&fc->fence_list, &local_list_head);
+	spin_unlock(&ctx->list_lock);
+
+	list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
+		spin_lock_irqsave(&ctx->lock, flags);
+		is_signaled = fence_is_signaled_locked(&fc->base);
+		spin_unlock_irqrestore(&ctx->lock, flags);
+
+		if (is_signaled) {
+			kref_put(&ctx->kref, sde_fence_destroy);
+		} else {
+			spin_lock(&ctx->list_lock);
+			list_move(&fc->fence_list, &ctx->fence_list_head);
+			spin_unlock(&ctx->list_lock);
+		}
+	}
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index f3f8b35..207f29c 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -29,7 +29,8 @@
  * @done_count: Number of completed commits since bootup
  * @drm_id: ID number of owning DRM Object
  * @ref: kref counter on timeline
- * @lock: spinlock for timeline and fence counter protection
+ * @lock: spinlock for fence counter protection
+ * @list_lock: spinlock for timeline protection
  * @context: fence context
  * @list_head: fence list to hold all the fence created on this context
  * @name: name of fence context/timeline
@@ -40,6 +41,7 @@
 	uint32_t drm_id;
 	struct kref kref;
 	spinlock_t lock;
+	spinlock_t list_lock;
 	u64 context;
 	struct list_head fence_list_head;
 	char name[SDE_FENCE_NAME_SIZE];
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index e7f3df7..c3477b5 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -1072,7 +1072,8 @@
 			DRM_ERROR("invalid handle for plane %d\n", i);
 			return -EINVAL;
 		}
-		bos_total_size += bos[i]->size;
+		if ((i == 0) || (bos[i] != bos[0]))
+			bos_total_size += bos[i]->size;
 	}
 
 	if (bos_total_size < layout.total_size) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index cfa3b5e..b8ab066 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -120,6 +120,7 @@
 	SRC_SPLIT,
 	DIM_LAYER,
 	SMART_DMA_REV,
+	IDLE_PC,
 	SDE_PROP_MAX,
 };
 
@@ -313,6 +314,7 @@
 	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
 	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
 	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
+	{IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
 };
 
 static struct sde_prop_type sde_perf_prop[] = {
@@ -2214,6 +2216,7 @@
 
 	cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
 	cfg->has_dim_layer = PROP_VALUE_ACCESS(prop_value, DIM_LAYER, 0);
+	cfg->has_idle_pc = PROP_VALUE_ACCESS(prop_value, IDLE_PC, 0);
 end:
 	kfree(prop_value);
 	return rc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 97da08f..b5f83ad 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -713,6 +713,7 @@
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
  * @sbuf_headroom      stream buffer headroom in lines
+ * @has_idle_pc        indicate if idle power collapse feature is supported
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
@@ -735,6 +736,7 @@
 	u32 ubwc_version;
 	bool has_sbuf;
 	u32 sbuf_headroom;
+	bool has_idle_pc;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 82f1c09..a62aa6e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -26,6 +26,7 @@
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 #define   CTL_ROT_TOP                   0x0C0
@@ -78,6 +79,11 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
+static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
+{
+	SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
 static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
 {
 	SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
@@ -537,6 +543,7 @@
 	ops->trigger_flush = sde_hw_ctl_trigger_flush;
 	ops->get_flush_register = sde_hw_ctl_get_flush_register;
 	ops->trigger_start = sde_hw_ctl_trigger_start;
+	ops->trigger_pending = sde_hw_ctl_trigger_pending;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
 	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 7ae43b7..ace05e8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -91,6 +91,14 @@
 	void (*trigger_start)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * kickoff rotator operation for Sw controlled interfaces
 	 * DSI cmd mode and WB interface are SW controlled
 	 * @ctx       : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index f1b9c32..8df4de2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -25,6 +25,9 @@
 {
 	int i;
 
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
 	for (i = 0; i < m->dspp_count; i++) {
 		if (dspp == m->dspp[i].id) {
 			b->base_off = addr;
@@ -43,6 +46,9 @@
 {
 	int i = 0, ret;
 
+	if (!c || !c->cap || !c->cap->sblk)
+		return;
+
 	for (i = 0; i < SDE_DSPP_MAX; i++) {
 		if (!test_bit(i, &features))
 			continue;
@@ -119,6 +125,9 @@
 	struct sde_hw_dspp *c;
 	struct sde_dspp_cfg *cfg;
 
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 6020476..70b3e56 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -192,6 +192,7 @@
  * should be called once before accessing every dspp.
  * @idx:  DSPP index for which driver object is required
  * @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
  */
 struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
 			void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index d5289c0..24f16c6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -31,9 +31,9 @@
 #define MDP_INTF_4_OFF			0x6D000
 #define MDP_AD4_0_OFF			0x7D000
 #define MDP_AD4_1_OFF			0x7E000
-#define MDP_AD4_INTR_EN_OFF 0x41c
-#define MDP_AD4_INTR_CLEAR_OFF 0x424
-#define MDP_AD4_INTR_STATUS_OFF 0x420
+#define MDP_AD4_INTR_EN_OFF		0x41c
+#define MDP_AD4_INTR_CLEAR_OFF		0x424
+#define MDP_AD4_INTR_STATUS_OFF		0x420
 
 /**
  * WB interrupt status bit definitions
@@ -87,7 +87,7 @@
  * Pingpong Secondary interrupt status bit definitions
  */
 #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
-#define	SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
 #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
 #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
 #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
@@ -109,6 +109,15 @@
 #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
 
 /**
+ * Ctl start interrupt status bit definitions
+ */
+#define SDE_INTR_CTL_0_START BIT(9)
+#define SDE_INTR_CTL_1_START BIT(10)
+#define SDE_INTR_CTL_2_START BIT(11)
+#define SDE_INTR_CTL_3_START BIT(12)
+#define SDE_INTR_CTL_4_START BIT(13)
+
+/**
  * Concurrent WB overflow interrupt status bit definitions
  */
 #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
@@ -325,15 +334,21 @@
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 40-43 */
+	/* irq_idx: 40 */
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
 		SDE_INTR_PING_PONG_S0_RD_PTR, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 44-47 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 41-45 */
+	{ SDE_IRQ_TYPE_CTL_START, CTL_0,
+		SDE_INTR_CTL_0_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_1,
+		SDE_INTR_CTL_1_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_2,
+		SDE_INTR_CTL_2_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_3,
+		SDE_INTR_CTL_3_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_4,
+		SDE_INTR_CTL_4_START, 1},
+	/* irq_idx: 46-47 */
 	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
 	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
 	/* irq_idx: 48-51 */
@@ -696,6 +711,9 @@
 static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
 		uint32_t mask)
 {
+	if (!intr)
+		return;
+
 	SDE_REG_WRITE(&intr->hw, reg_off, mask);
 }
 
@@ -710,6 +728,9 @@
 	u32 irq_status;
 	unsigned long irq_flags;
 
+	if (!intr)
+		return;
+
 	/*
 	 * The dispatcher will save the IRQ status before calling here.
 	 * Now need to go through each IRQ status and find matching
@@ -726,6 +747,10 @@
 		start_idx = reg_idx * 32;
 		end_idx = start_idx + 32;
 
+		if (start_idx >= ARRAY_SIZE(sde_irq_map) ||
+				end_idx > ARRAY_SIZE(sde_irq_map))
+			continue;
+
 		/*
 		 * Search through matching intr status from irq map.
 		 * start_idx and end_idx defined the search range in
@@ -769,6 +794,9 @@
 	const char *dbgstr = NULL;
 	uint32_t cache_irq_mask;
 
+	if (!intr)
+		return -EINVAL;
+
 	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
@@ -810,6 +838,9 @@
 	const char *dbgstr = NULL;
 	uint32_t cache_irq_mask;
 
+	if (!intr)
+		return -EINVAL;
+
 	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
@@ -846,6 +877,9 @@
 {
 	int i;
 
+	if (!intr)
+		return -EINVAL;
+
 	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
 		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
 
@@ -856,6 +890,9 @@
 {
 	int i;
 
+	if (!intr)
+		return -EINVAL;
+
 	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
 		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
 
@@ -865,15 +902,23 @@
 static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
 		uint32_t *mask)
 {
+	if (!intr || !mask)
+		return -EINVAL;
+
 	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
 		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
 	return 0;
 }
 
 static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
 		uint32_t *sources)
 {
+	if (!intr || !sources)
+		return -EINVAL;
+
 	*sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+
 	return 0;
 }
 
@@ -883,6 +928,9 @@
 	u32 enable_mask;
 	unsigned long irq_flags;
 
+	if (!intr)
+		return;
+
 	spin_lock_irqsave(&intr->status_lock, irq_flags);
 	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
 		/* Read interrupt status */
@@ -909,6 +957,9 @@
 	int reg_idx;
 	unsigned long irq_flags;
 
+	if (!intr)
+		return;
+
 	spin_lock_irqsave(&intr->mask_lock, irq_flags);
 
 	reg_idx = sde_irq_map[irq_idx].reg_idx;
@@ -925,6 +976,9 @@
 	unsigned long irq_flags;
 	u32 intr_status;
 
+	if (!intr)
+		return 0;
+
 	spin_lock_irqsave(&intr->mask_lock, irq_flags);
 
 	reg_idx = sde_irq_map[irq_idx].reg_idx;
@@ -959,7 +1013,7 @@
 static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
 		void __iomem *addr, struct sde_hw_blk_reg_map *hw)
 {
-	if (m->mdp_count == 0)
+	if (!m || !addr || !hw || m->mdp_count == 0)
 		return NULL;
 
 	hw->base_off = addr;
@@ -971,9 +1025,13 @@
 struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
 		struct sde_mdss_cfg *m)
 {
-	struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	struct sde_hw_intr *intr;
 	struct sde_mdss_base_cfg *cfg;
 
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
 	if (!intr)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index 7805df1..aaba1be 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -25,7 +25,7 @@
 #define IRQ_SOURCE_DSI1		BIT(5)
 #define IRQ_SOURCE_HDMI		BIT(8)
 #define IRQ_SOURCE_EDP		BIT(12)
-#define	IRQ_SOURCE_MHL		BIT(16)
+#define IRQ_SOURCE_MHL		BIT(16)
 
 /**
  * sde_intr_type - HW Interrupt Type
@@ -55,6 +55,7 @@
  * @SDE_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
  * @SDE_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
  * @SDE_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
+ * @SDE_IRQ_TYPE_CTL_START:		Control start
  * @SDE_IRQ_TYPE_RESERVED:		Reserved for expansion
  */
 enum sde_intr_type {
@@ -84,6 +85,7 @@
 	SDE_IRQ_TYPE_SFI_CMD_2_OUT,
 	SDE_IRQ_TYPE_PROG_LINE,
 	SDE_IRQ_TYPE_AD4_BL_DONE,
+	SDE_IRQ_TYPE_CTL_START,
 	SDE_IRQ_TYPE_RESERVED,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 01fe3c8..d15b804 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -574,9 +574,16 @@
 
 	rot_cmd.video_mode = data->video_mode;
 	rot_cmd.fps = data->fps;
+
+	/*
+	 * DRM rotation property is specified in counter clockwise direction
+	 * whereas rotator h/w rotates in clockwise direction.
+	 * Convert rotation property to clockwise 90 by toggling h/v flip
+	 */
 	rot_cmd.rot90 = data->rot90;
-	rot_cmd.hflip = data->hflip;
-	rot_cmd.vflip = data->vflip;
+	rot_cmd.hflip = data->rot90 ? !data->hflip : data->hflip;
+	rot_cmd.vflip = data->rot90 ? !data->vflip : data->vflip;
+
 	rot_cmd.secure = data->secure;
 	rot_cmd.clkrate = data->clkrate;
 	rot_cmd.data_bw = 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b20b3bc..bd212e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -34,18 +34,31 @@
 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
 
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
 #define DCE_SEL                           0x450
 
 static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
 		struct split_pipe_cfg *cfg)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 upper_pipe = 0;
 	u32 lower_pipe = 0;
 
 	if (!mdp || !cfg)
 		return;
 
+	c = &mdp->hw;
+
 	if (cfg->en) {
 		if (cfg->mode == INTF_MODE_CMD) {
 			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
@@ -107,9 +120,14 @@
 static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
 		struct cdm_output_cfg *cfg)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 out_ctl = 0;
 
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
 	if (cfg->wb_en)
 		out_ctl |= BIT(24);
 	else if (cfg->intf_en)
@@ -121,11 +139,16 @@
 static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
 		enum sde_clk_ctrl_type clk_ctrl, bool enable)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 reg_off, bit_off;
 	u32 reg_val, new_val;
 	bool clk_forced_on;
 
+	if (!mdp)
+		return false;
+
+	c = &mdp->hw;
+
 	if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
 		return false;
 
@@ -150,9 +173,14 @@
 static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
 		struct sde_danger_safe_status *status)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 value;
 
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
 	value = SDE_REG_READ(c, DANGER_STATUS);
 	status->mdp = (value >> 0) & 0x3;
 	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
@@ -175,12 +203,50 @@
 	status->wb[WB_3] = 0;
 }
 
+static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
+		struct sde_watchdog_te_status *cfg, bool watchdog_te)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 reg = 0;
+	int i = 0;
+	u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+
+	if (!mdp)
+		return;
+
+	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		if (watchdog_te)
+			reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+		else
+			reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+	}
+
+	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (watchdog_te) {
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+				CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
+		reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+	}
+}
+
 static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
 		struct sde_danger_safe_status *status)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 value;
 
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
 	value = SDE_REG_READ(c, SAFE_STATUS);
 	status->mdp = (value >> 0) & 0x1;
 	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
@@ -205,11 +271,32 @@
 
 static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
 {
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	struct sde_hw_blk_reg_map *c;
+
+	if (!mdp)
+		return;
+
+	c = &mdp->hw;
 
 	SDE_REG_WRITE(c, DCE_SEL, dce_sel);
 }
 
+void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
+{
+	struct sde_hw_blk_reg_map c;
+
+	if (!mdp || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
+		return;
+
+	/* force blk offset to zero to access beginning of register region */
+	c = mdp->hw;
+	c.blk_off = 0x0;
+	SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
 static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 		unsigned long cap)
 {
@@ -218,8 +305,10 @@
 	ops->setup_cdm_output = sde_hw_setup_cdm_output;
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 	ops->get_danger_status = sde_hw_get_danger_status;
+	ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
 	ops->get_safe_status = sde_hw_get_safe_status;
 	ops->setup_dce = sde_hw_setup_dce;
+	ops->reset_ubwc = sde_hw_reset_ubwc;
 }
 
 static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
@@ -229,6 +318,9 @@
 {
 	int i;
 
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
 	for (i = 0; i < m->mdp_count; i++) {
 		if (mdp == m->mdp[i].id) {
 			b->base_off = addr;
@@ -243,25 +335,6 @@
 	return ERR_PTR(-EINVAL);
 }
 
-static inline void _sde_hw_mdptop_init_ubwc(void __iomem *addr,
-		const struct sde_mdss_cfg *m)
-{
-	struct sde_hw_blk_reg_map hw;
-
-	if (!addr || !m)
-		return;
-
-	if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
-		return;
-
-	memset(&hw, 0, sizeof(hw));
-	hw.base_off = addr;
-	hw.blk_off = 0x0;
-	hw.hwversion = m->hwversion;
-	hw.log_mask = SDE_DBG_MASK_TOP;
-	SDE_REG_WRITE(&hw, UBWC_STATIC, m->mdp[0].ubwc_static);
-}
-
 struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
 		void __iomem *addr,
 		const struct sde_mdss_cfg *m)
@@ -294,8 +367,6 @@
 			mdp->hw.xin_id);
 	sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
 
-	_sde_hw_mdptop_init_ubwc(addr, m);
-
 	return mdp;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 9cb0c55..9cb4494 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -77,6 +77,18 @@
 };
 
 /**
+ * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: base address of ping pong info
+ */
+struct sde_watchdog_te_status {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[];
+};
+
+/**
  * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
  * Assumption is these functions will be called after clocks are enabled.
  * @setup_split_pipe : Programs the pipe control registers
@@ -142,12 +154,28 @@
 			struct sde_danger_safe_status *status);
 
 	/**
+	 * setup_vsync_sel - get vsync configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: watchdog timer configuration
+	 * @watchdog_te: watchdog timer enable
+	 */
+	void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
+			struct sde_watchdog_te_status *cfg, bool watchdog_te);
+
+	/**
 	 * get_safe_status - get safe status
 	 * @mdp: mdp top context driver
 	 * @status: Pointer to danger safe status
 	 */
 	void (*get_safe_status)(struct sde_hw_mdp *mdp,
 			struct sde_danger_safe_status *status);
+
+	/**
+	 * reset_ubwc - reset top level UBWC configuration
+	 * @mdp: mdp top context driver
+	 * @m: pointer to mdss catalog data
+	 */
+	void (*reset_ubwc)(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m);
 };
 
 struct sde_hw_mdp {
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 4a5479d..b68d736 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -567,7 +567,8 @@
 		.set_backlight = dsi_display_set_backlight,
 		.soft_reset   = dsi_display_soft_reset,
 		.pre_kickoff  = dsi_conn_pre_kickoff,
-		.clk_ctrl = dsi_display_clk_ctrl
+		.clk_ctrl = dsi_display_clk_ctrl,
+		.get_topology = dsi_conn_get_topology
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -575,7 +576,8 @@
 		.get_modes =    sde_wb_connector_get_modes,
 		.set_property = sde_wb_connector_set_property,
 		.get_info =     sde_wb_get_info,
-		.soft_reset =   NULL
+		.soft_reset =   NULL,
+		.get_topology = sde_wb_get_topology
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 8662207..c408861 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -75,12 +75,6 @@
 
 #define TX_MODE_BUFFER_LINE_THRES 2
 
-/* dirty bits for update function */
-#define SDE_PLANE_DIRTY_RECTS	0x1
-#define SDE_PLANE_DIRTY_FORMAT	0x2
-#define SDE_PLANE_DIRTY_SHARPEN	0x4
-#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
-
 #define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
 #define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
 
@@ -107,7 +101,8 @@
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
  * @catalog: Points to sde catalog structure
  * @sbuf_mode: force stream buffer mode if set
- * @sbuf_writeback: fource stream buffer writeback if set
+ * @sbuf_writeback: force stream buffer writeback if set
+ * @revalidate: force revalidation of all the plane properties
  * @blob_rot_caps: Pointer to rotator capability blob
  */
 struct sde_plane {
@@ -134,6 +129,7 @@
 	struct sde_mdss_cfg *catalog;
 	u32 sbuf_mode;
 	u32 sbuf_writeback;
+	bool revalidate;
 
 	struct sde_hw_pixel_ext pixel_ext;
 	bool pixel_ext_usr;
@@ -499,6 +495,17 @@
 			&psde->pipe_qos_cfg);
 }
 
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable)
+{
+	struct sde_plane *psde;
+
+	if (!plane)
+		return;
+
+	psde = to_sde_plane(plane);
+	psde->revalidate = enable;
+}
+
 int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
 {
 	struct sde_plane *psde;
@@ -1961,21 +1968,29 @@
 		ret = sde_plane_rot_submit_command(plane, state,
 				SDE_HW_ROT_CMD_VALIDATE);
 
-	} else if (sde_plane_enabled(state)) {
+	} else {
 
 		SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
 				rstate->sequence_id);
 
 		/* bypass rotator - initialize output setting as input */
+		for (i = 0; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
+			rstate->out_fb_modifier[i] = state->fb ?
+				state->fb->modifier[i] : 0x0;
+
+		if (state->fb) {
+			rstate->out_fb_pixel_format = state->fb->pixel_format;
+			rstate->out_fb_flags = state->fb->flags;
+			rstate->out_fb_width = state->fb->width;
+			rstate->out_fb_height = state->fb->height;
+		} else {
+			rstate->out_fb_pixel_format = 0x0;
+			rstate->out_fb_flags = 0x0;
+			rstate->out_fb_width = 0;
+			rstate->out_fb_height = 0;
+		}
+
 		rstate->out_rotation = rstate->in_rotation;
-		rstate->out_fb_pixel_format = state->fb->pixel_format;
-
-		for (i = 0.; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
-			rstate->out_fb_modifier[i] = state->fb->modifier[i];
-
-		rstate->out_fb_flags = state->fb->flags;
-		rstate->out_fb_width = state->fb->width;
-		rstate->out_fb_height = state->fb->height;
 		rstate->out_src_x = state->src_x;
 		rstate->out_src_y = state->src_y;
 		rstate->out_src_w = state->src_w;
@@ -2242,6 +2257,14 @@
 			state->crtc_w, state->crtc_h,
 			state->crtc_x, state->crtc_y);
 
+	/* force reprogramming of all the parameters, if the flag is set */
+	if (psde->revalidate) {
+		SDE_DEBUG("plane:%d - reconfigure all the parameters\n",
+				plane->base.id);
+		pstate->dirty = SDE_PLANE_DIRTY_ALL;
+		psde->revalidate = false;
+	}
+
 	/* determine what needs to be refreshed */
 	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
 		switch (idx) {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e955f41..ac70542 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -91,6 +91,12 @@
 	int out_xpos;
 };
 
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS	0x1
+#define SDE_PLANE_DIRTY_FORMAT	0x2
+#define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
+
 /**
  * struct sde_plane_state: Define sde extension of drm plane state object
  * @base:	base drm plane state object
@@ -222,4 +228,12 @@
 int sde_plane_color_fill(struct drm_plane *plane,
 		uint32_t color, uint32_t alpha);
 
+/**
+ * sde_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void sde_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
 #endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 66318b3..427a93b 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -32,22 +32,40 @@
 #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
 #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
 #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
-#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
-#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct sde_rm_topology_def {
+	enum sde_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct sde_rm_topology_def g_top_table[] = {
+	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
+};
 
 /**
  * struct sde_rm_requirements - Reservation requirements parameter bundle
- * @top_name:	DRM<->HW topology use case user is trying to enable
- * @dspp:	Whether the user requires a DSPP
- * @num_lm:	Number of layer mixers needed in the use case
- * @hw_res:	Hardware resources required as reported by the encoders
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
  */
 struct sde_rm_requirements {
-	enum sde_rm_topology_name top_name;
 	uint64_t top_ctrl;
-	int num_lm;
-	int num_ctl;
-	bool needs_split_display;
+	const struct sde_rm_topology_def *topology;
 	struct sde_encoder_hw_resources hw_res;
 };
 
@@ -607,7 +625,7 @@
 	}
 
 	pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
-	if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+	if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
 			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
 		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
 		*dspp = NULL;
@@ -630,14 +648,15 @@
 	int lm_count = 0;
 	int i, rc = 0;
 
-	if (!reqs->num_lm) {
-		SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+	if (!reqs->topology->num_lm) {
+		SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
 		return -EINVAL;
 	}
 
 	/* Find a primary mixer */
 	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
-	while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+	while (lm_count != reqs->topology->num_lm &&
+			sde_rm_get_hw(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
 		memset(&pp, 0, sizeof(pp));
@@ -655,7 +674,8 @@
 		/* Valid primary mixer found, find matching peers */
 		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
 
-		while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+		while (lm_count != reqs->topology->num_lm &&
+				sde_rm_get_hw(rm, &iter_j)) {
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
@@ -669,7 +689,7 @@
 		}
 	}
 
-	if (lm_count != reqs->num_lm) {
+	if (lm_count != reqs->topology->num_lm) {
 		SDE_DEBUG("unable to find appropriate mixers\n");
 		return -ENAVAIL;
 	}
@@ -687,7 +707,7 @@
 				dspp[i] ? dspp[i]->id : 0);
 	}
 
-	if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+	if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
 		/* reserve a free PINGPONG_SLAVE block */
 		rc = -ENAVAIL;
 		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
@@ -713,7 +733,7 @@
 static int _sde_rm_reserve_ctls(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter;
@@ -735,23 +755,23 @@
 
 		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
 
-		if (reqs->needs_split_display != has_split_display)
+		if (top->needs_split_display != has_split_display)
 			continue;
 
-		if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+		if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
 			continue;
 
 		ctls[i] = iter.blk;
 		SDE_DEBUG("ctl %d match\n", iter.blk->id);
 
-		if (++i == reqs->num_ctl)
+		if (++i == top->num_ctl)
 			break;
 	}
 
-	if (i != reqs->num_ctl)
+	if (i != top->num_ctl)
 		return -ENAVAIL;
 
-	for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
 		ctls[i]->rsvp_nxt = rsvp;
 		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
 	}
@@ -762,13 +782,13 @@
 static int _sde_rm_reserve_dsc(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_iter iter;
 	int alloc_count = 0;
-	int num_dsc_enc = reqs->num_lm;
+	int num_dsc_enc = top->num_lm;
 
-	if (!reqs->hw_res.needs_dsc)
+	if (!top->num_comp_enc)
 		return 0;
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
@@ -912,11 +932,12 @@
 		struct sde_rm_requirements *reqs)
 {
 	int ret;
+	struct sde_rm_topology_def topology;
 
 	/* Create reservation info, tag reserved blocks with it as we go */
 	rsvp->seq = ++rm->rsvp_next_seq;
 	rsvp->enc_id = enc->base.id;
-	rsvp->topology = reqs->top_name;
+	rsvp->topology = reqs->topology->top_name;
 	list_add_tail(&rsvp->list, &rm->rsvps);
 
 	/*
@@ -941,10 +962,11 @@
 	 * - Check mixers without Split Display
 	 * - Only then allow to grab from CTLs with split display capability
 	 */
-	_sde_rm_reserve_ctls(rm, rsvp, reqs);
-	if (ret && !reqs->needs_split_display) {
-		reqs->needs_split_display = true;
-		_sde_rm_reserve_ctls(rm, rsvp, reqs);
+	_sde_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_sde_rm_reserve_ctls(rm, rsvp, &topology);
 	}
 	if (ret) {
 		SDE_ERROR("unable to find appropriate CTL\n");
@@ -956,7 +978,7 @@
 	if (ret)
 		return ret;
 
-	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs);
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology);
 	if (ret)
 		return ret;
 
@@ -971,37 +993,7 @@
 		struct sde_rm_requirements *reqs)
 {
 	const struct drm_display_mode *mode = &crtc_state->mode;
-
-	/**
-	 * DRM<->HW Topologies
-	 *
-	 * Name: SINGLEPIPE
-	 * Description: 1 LM, 1 PP, 1 INTF
-	 * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
-	 *
-	 * Name: DUALPIPE
-	 * Description: 2 LM, 2 PP, 2 INTF
-	 * Condition: 1 DRM Encoder w/ 2 Display Tiles
-	 *
-	 * Name: PPSPLIT
-	 * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 2 Display Tiles
-	 *	topology_control & SDE_TOPREQ_PPSPLIT
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width >= layer_mixer.max_width
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width <= layer_mixer.max_width
-	 *	topology_control & SDE_TOPREQ_FORCE_TILING
-	 */
+	int i;
 
 	memset(reqs, 0, sizeof(*reqs));
 
@@ -1009,63 +1001,32 @@
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
 	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
 
-	/* DSC blocks are hardwired for control path 0 and 1 */
-	if (reqs->hw_res.needs_dsc)
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-
-	/* Base assumption is LMs = h_tiles, conditions below may override */
-	reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
-
-	if (reqs->num_lm == 2) {
-		if (RM_RQ_PPSPLIT(reqs)) {
-			/* user requests serving dual display with 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
-			reqs->num_lm = 1;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = true;
-		} else {
-			/* dual display, serve with 2 lms */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
-			reqs->num_ctl = 2;
-			reqs->needs_split_display = true;
+	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					reqs->hw_res.topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
 		}
+	}
 
-	} else if (reqs->num_lm == 1) {
-		if (mode->hdisplay > rm->lm_max_width) {
-			/* wide display, must split across 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else if (RM_RQ_FORCE_TILING(reqs)) {
-			/* thin display, but user requests 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else {
-			/* thin display, serve with only 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		}
-
-	} else {
-		/* Currently no configurations with # LM > 2 */
-		SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+	if (!reqs->topology) {
+		SDE_ERROR("invalid topology for the display\n");
 		return -EINVAL;
 	}
 
-	SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+	/* DSC blocks are hardwired for control path 0 and 1 */
+	if (reqs->topology->num_comp_enc)
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+
+	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 			reqs->hw_res.display_num_of_h_tiles);
-	SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
-			mode->hdisplay, rm->lm_max_width);
-	SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
-			reqs->num_ctl, reqs->top_name);
-	SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
-			reqs->top_name);
-	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
-			reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+			reqs->topology->num_lm, reqs->topology->num_ctl,
+			reqs->topology->top_name,
+			reqs->topology->needs_split_display);
+	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->topology->num_lm,
+			reqs->top_ctrl, reqs->topology->top_name,
+			reqs->topology->num_ctl);
 
 	return 0;
 }
@@ -1189,7 +1150,7 @@
 				sde_connector_get_propinfo(conn),
 				sde_connector_get_property_values(conn->state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 }
 
@@ -1233,17 +1194,6 @@
 	return ret;
 }
 
-int sde_rm_check_property_topctl(uint64_t val)
-{
-	if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
-			(BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
-		SDE_ERROR("ppsplit & force_tiling are incompatible\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 int sde_rm_reserve(
 		struct sde_rm *rm,
 		struct drm_encoder *enc,
@@ -1310,7 +1260,7 @@
 						conn_state->connector),
 				sde_connector_get_property_values(conn_state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 4127bc2..059952a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -22,18 +22,27 @@
 
 /**
  * enum sde_rm_topology_name - HW resource use case in use by connector
- * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
- * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:       1 LM, 1 DSC, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSC:         2 LM, 2 DSC, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: 2 LM, 2 PP, 3DMux, 1 DSC, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:    2 LM, 2 PP, 2 DSC Merge, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT:              1 LM, 2 PPs, 2 INTF/WB
  */
 enum sde_rm_topology_name {
-	SDE_RM_TOPOLOGY_UNKNOWN = 0,
+	SDE_RM_TOPOLOGY_NONE = 0,
 	SDE_RM_TOPOLOGY_SINGLEPIPE,
+	SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,
 	SDE_RM_TOPOLOGY_DUALPIPE,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,
 	SDE_RM_TOPOLOGY_PPSPLIT,
-	SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+	SDE_RM_TOPOLOGY_MAX,
 };
 
 /**
@@ -47,18 +56,11 @@
  *                               Normal behavior would not impact the
  *                               reservation list during the AtomicTest phase.
  * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
- * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
- *                              mixers, despite width fitting within capability
- *                              of a single layer mixer.
- * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
- *                         configuration instead of dual pipe.
  */
 enum sde_rm_topology_control {
 	SDE_RM_TOPCTL_RESERVE_LOCK,
 	SDE_RM_TOPCTL_RESERVE_CLEAR,
 	SDE_RM_TOPCTL_DSPP,
-	SDE_RM_TOPCTL_FORCE_TILING,
-	SDE_RM_TOPCTL_PPSPLIT,
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 2220925..b2665be 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -286,6 +286,27 @@
 	return 0;
 }
 
+int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
+	struct msm_display_topology *topology, u32 max_mixer_width)
+{
+	const u32 dual_lm = 2;
+	const u32 single_lm = 1;
+	const u32 single_intf = 1;
+	const u32 no_enc = 0;
+
+	if (!drm_mode || !topology || !max_mixer_width) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
+							dual_lm : single_lm;
+	topology->num_enc = no_enc;
+	topology->num_intf = single_intf;
+
+	return 0;
+}
+
 int sde_wb_connector_post_init(struct drm_connector *connector,
 		void *info,
 		void *display)
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index 4e33595..205ff24 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
  * @wb_lock		Serialization lock for writeback context structure
  * @connector:		Connector associated with writeback device
  * @encoder:		Encoder associated with writeback device
+ * @max_mixer_width:    Max width supported by SDE LM HW block
  * @count_modes:	Length of writeback connector modes array
  * @modes:		Writeback connector modes array
  */
@@ -49,6 +50,8 @@
 	struct drm_encoder *encoder;
 
 	enum drm_connector_status detect_status;
+	u32 max_mixer_width;
+
 	u32 count_modes;
 	struct drm_mode_modeinfo *modes;
 };
@@ -183,6 +186,17 @@
 int sde_wb_get_info(struct msm_display_info *info, void *display);
 
 /**
+ * sde_wb_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: zero on success
+ */
+int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
+		struct msm_display_topology *topology,
+		u32 max_mixer_width);
+
+/**
  * sde_wb_connector_get_wb - retrieve writeback device of the given connector
  * @connector: Pointer to drm connector
  * Returns: Pointer to writeback device on success; NULL otherwise
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 3618479..1e4f6b1 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -841,7 +841,15 @@
 			goto data_bus_hdl_err;
 		}
 
-		if (!phandle->rsc_client_init) {
+		/*
+		 * - When the target is RSCC enabled, regulator should
+		 *   be enabled by the s/w only for the first time during
+		 *   bootup. After that, RSCC hardware takes care of enabling/
+		 *   disabling it.
+		 * - When the target is not RSCC enabled, regulator should
+		 *   be totally handled by the software.
+		 */
+		if (!phandle->rsc_client) {
 			rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
 									enable);
 			if (rc) {
@@ -883,7 +891,7 @@
 		sde_power_reg_bus_update(phandle->reg_bus_hdl,
 							max_usecase_ndx);
 
-		if (!phandle->rsc_client_init)
+		if (!phandle->rsc_client)
 			msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
 									enable);
 		sde_power_data_bus_update(&phandle->data_bus_handle, enable);
@@ -901,7 +909,7 @@
 rsc_err:
 	sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
 reg_bus_hdl_err:
-	if (!phandle->rsc_client_init)
+	if (!phandle->rsc_client)
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
 	sde_power_data_bus_update(&phandle->data_bus_handle, 0);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index b26ef9f..d753f0a 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	64000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	64000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index d762904..3413ee7 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -28,16 +28,19 @@
 #include <drm/drmP.h>
 #include <drm/drm_irq.h>
 #include "sde_rsc_priv.h"
+#include "sde_dbg.h"
 
-/* this time is ~0.02ms */
-#define RSC_BACKOFF_TIME_NS		 20000
+/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
+#define TCS_CASE_EXECUTION_TIME				1064000
 
-/* next two values should be same based on doc */
+/* this time is ~1ms - only wake tcs in any mode */
+#define RSC_BACKOFF_TIME_NS		 (TCS_CASE_EXECUTION_TIME + 100)
 
-/* this time is ~0.2ms */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS	200000
-/* this time is ~0.2ms */
-#define RSC_TIME_SLOT_0_NS		200000
+/* this time is ~1ms - only wake TCS in mode-0 */
+#define RSC_MODE_THRESHOLD_TIME_IN_NS	((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+
+/* this time is ~2ms - sleep+ wake TCS in mode-1 */
+#define RSC_TIME_SLOT_0_NS		((TCS_CASE_EXECUTION_TIME * 2) + 100)
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER		5
@@ -74,6 +77,7 @@
 {
 	struct sde_rsc_client *client;
 	struct sde_rsc_priv *rsc;
+	static int id;
 
 	if (!client_name) {
 		pr_err("client name is null- not supported\n");
@@ -83,7 +87,7 @@
 		return ERR_PTR(-EINVAL);
 	} else if (!rsc_prv_list[rsc_index]) {
 		pr_err("rsc not probed yet or not available\n");
-		return ERR_PTR(-EINVAL);
+		return NULL;
 	}
 
 	rsc = rsc_prv_list[rsc_index];
@@ -95,12 +99,14 @@
 	strlcpy(client->name, client_name, MAX_RSC_CLIENT_NAME_LEN);
 	client->current_state = SDE_RSC_IDLE_STATE;
 	client->rsc_index = rsc_index;
+	client->id = id;
 	if (is_primary_client)
 		rsc->primary_client = client;
 	pr_debug("client %s rsc index:%d primary:%d\n", client_name,
 						rsc_index, is_primary_client);
 
 	list_add(&client->list, &rsc->client_list);
+	id++;
 	mutex_unlock(&rsc->client_lock);
 
 	return client;
@@ -381,6 +387,8 @@
 
 	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+		if (!rc)
+			rpmh_mode_solver_set(rsc->disp_rsc, false);
 	}
 
 	return rc;
@@ -413,8 +421,11 @@
 		if (client->current_state == SDE_RSC_VID_STATE)
 			goto end;
 
-	if (rsc->hw_ops.state_update)
+	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+		if (!rc)
+			rpmh_mode_solver_set(rsc->disp_rsc, true);
+	}
 
 	/* wait for vsync for vid to cmd state switch */
 	if (!rc && (rsc->current_state == SDE_RSC_VID_STATE))
@@ -434,8 +445,11 @@
 		    (client->current_state == SDE_RSC_CMD_STATE))
 			goto end;
 
-	if (rsc->hw_ops.state_update)
+	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
+		if (!rc)
+			rpmh_mode_solver_set(rsc->disp_rsc, false);
+	}
 
 	/* wait for vsync for cmd to clk state switch */
 	if (!rc && rsc->primary_client &&
@@ -457,8 +471,11 @@
 		sde_rsc_timer_calculate(rsc, config);
 
 	/* video state switch should be done immediately */
-	if (rsc->hw_ops.state_update)
+	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
+		if (!rc)
+			rpmh_mode_solver_set(rsc->disp_rsc, false);
+	}
 
 	/* wait for vsync for cmd to vid state switch */
 	if (!rc && rsc->primary_client &&
@@ -502,6 +519,8 @@
 		return -EINVAL;
 
 	mutex_lock(&rsc->client_lock);
+	SDE_EVT32(caller_client->id, caller_client->current_state,
+			state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
 	caller_client->crtc_id = crtc_id;
 	caller_client->current_state = state;
 
@@ -559,14 +578,20 @@
 
 	if (rc == STATE_UPDATE_NOT_ALLOWED) {
 		rc = 0;
+		SDE_EVT32(caller_client->id, caller_client->current_state,
+			state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE1);
 		goto clk_disable;
 	} else if (rc) {
 		pr_debug("state:%d update failed rc:%d\n", state, rc);
+		SDE_EVT32(caller_client->id, caller_client->current_state,
+			state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE2);
 		goto clk_disable;
 	}
 
 	pr_debug("state switch successfully complete: %d\n", state);
 	rsc->current_state = state;
+	SDE_EVT32(caller_client->id, caller_client->current_state,
+			state, rsc->current_state, SDE_EVTLOG_FUNC_EXIT);
 
 clk_disable:
 	if (rsc->current_state == SDE_RSC_IDLE_STATE)
@@ -755,14 +780,15 @@
 			const char __user *p, size_t count, loff_t *ppos)
 {
 	struct sde_rsc_priv *rsc = file->private_data;
-	char *input, *mode;
-	u32 mode0_state = 0, mode1_state = 0, mode2_state = 0;
+	char *input;
+	u32 mode_state = 0;
 	int rc;
 
-	if (!rsc || !rsc->hw_ops.mode_ctrl)
+	if (!rsc || !rsc->hw_ops.mode_ctrl || !count ||
+					count > MAX_COUNT_SIZE_SUPPORTED)
 		return 0;
 
-	input = kmalloc(count, GFP_KERNEL);
+	input = kmalloc(count + 1, GFP_KERNEL);
 	if (!input)
 		return -ENOMEM;
 
@@ -770,43 +796,35 @@
 		kfree(input);
 		return -EFAULT;
 	}
-	input[count - 1] = '\0';
+	input[count] = '\0';
+
+	rc = kstrtoint(input, 0, &mode_state);
+	if (rc) {
+		pr_err("mode_state: int conversion failed rc:%d\n", rc);
+		goto end;
+	}
+
+	pr_debug("mode_state: %d\n", mode_state);
+	mode_state &= 0x7;
+	if (mode_state != ALL_MODES_DISABLED &&
+			mode_state != ALL_MODES_ENABLED &&
+			mode_state != ONLY_MODE_0_ENABLED &&
+			mode_state != ONLY_MODE_0_1_ENABLED) {
+		pr_err("invalid mode:%d combination\n", mode_state);
+		goto end;
+	}
 
 	mutex_lock(&rsc->client_lock);
 	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 	if (rc)
 		goto clk_enable_fail;
 
-	mode = strnstr(input, "mode0=", strlen("mode0="));
-	if (mode) {
-		mode0_state = mode[0] - '0';
-		mode0_state &= BIT(0);
-		rsc->hw_ops.mode_ctrl(rsc, MODE0_UPDATE, NULL, 0, mode0_state);
-		goto end;
-	}
-
-	mode = strnstr(input, "mode1=", strlen("mode1="));
-	if (mode) {
-		mode1_state = mode[0] - '0';
-		mode1_state &= BIT(0);
-		rsc->hw_ops.mode_ctrl(rsc, MODE1_UPDATE, NULL, 0, mode1_state);
-		goto end;
-	}
-
-	mode = strnstr(input, "mode2=", strlen("mode2="));
-	if (mode) {
-		mode2_state = mode[0] - '0';
-		mode2_state &= BIT(0);
-		rsc->hw_ops.mode_ctrl(rsc, MODE2_UPDATE, NULL, 0, mode2_state);
-	}
-
-end:
+	rsc->hw_ops.mode_ctrl(rsc, MODE_UPDATE, NULL, 0, mode_state);
 	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+
 clk_enable_fail:
 	mutex_unlock(&rsc->client_lock);
-
-	pr_info("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
-								mode2_state);
+end:
 	kfree(input);
 	return count;
 }
@@ -854,14 +872,15 @@
 			const char __user *p, size_t count, loff_t *ppos)
 {
 	struct sde_rsc_priv *rsc = file->private_data;
-	char *input, *vsync_mode;
+	char *input;
 	u32 vsync_state = 0;
 	int rc;
 
-	if (!rsc || !rsc->hw_ops.hw_vsync)
+	if (!rsc || !rsc->hw_ops.hw_vsync || !count ||
+				count > MAX_COUNT_SIZE_SUPPORTED)
 		return 0;
 
-	input = kmalloc(count, GFP_KERNEL);
+	input = kmalloc(count + 1, GFP_KERNEL);
 	if (!input)
 		return -ENOMEM;
 
@@ -869,18 +888,21 @@
 		kfree(input);
 		return -EFAULT;
 	}
-	input[count - 1] = '\0';
+	input[count] = '\0';
 
-	vsync_mode = strnstr(input, "vsync_mode=", strlen("vsync_mode="));
-	if (vsync_mode) {
-		vsync_state = vsync_mode[0] - '0';
-		vsync_state &= 0x7;
+	rc = kstrtoint(input, 0, &vsync_state);
+	if (rc) {
+		pr_err("vsync_state: int conversion failed rc:%d\n", rc);
+		goto end;
 	}
 
+	pr_debug("vsync_state: %d\n", vsync_state);
+	vsync_state &= 0x7;
+
 	mutex_lock(&rsc->client_lock);
 	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 	if (rc)
-		goto end;
+		goto clk_en_fail;
 
 	if (vsync_state)
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -890,8 +912,9 @@
 
 	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 
-end:
+clk_en_fail:
 	mutex_unlock(&rsc->client_lock);
+end:
 	kfree(input);
 	return count;
 }
@@ -1063,9 +1086,6 @@
 		pr_err("sde rsc:get display rsc failed ret:%d\n", ret);
 		goto sde_rsc_fail;
 	}
-	rpmh_invalidate(rsc->disp_rsc);
-	/* call flush to disable the disp rsc interrupt */
-	rpmh_flush(rsc->disp_rsc);
 
 	ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
 	if (ret) {
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index b63fbc6..3332a05 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -95,6 +95,7 @@
 #define SDE_RSCC_F1_QTMR_V1_CNTP_CTL			0x302C
 
 #define MAX_CHECK_LOOPS			500
+#define POWER_CTRL_BIT_12		12
 
 static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
 {
@@ -191,27 +192,27 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
 						0xa138ebaa, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xe0a581e1, rsc->debug_mode);
+						0xaca581e1, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x82e2a2ed, rsc->debug_mode);
+						0xe2a2ede0, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0x88ea8a39, rsc->debug_mode);
+						0xea8a3982, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0xa6e9a920, rsc->debug_mode);
+						0xa920888c, rsc->debug_mode);
 
 	/* tcs sleep sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0xa92089e6, rsc->debug_mode);
+						0x89e6a6e9, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0x89e7a7e9, rsc->debug_mode);
+						0xa7e9a920, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x00000020, rsc->debug_mode);
+						0x002079e7, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x29, rsc->debug_mode);
+						0x2b, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x2f, rsc->debug_mode);
+						0x31, rsc->debug_mode);
 
 	return 0;
 }
@@ -266,7 +267,7 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
 					mode_0_start_addr, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0,
-					0x80000010, rsc->debug_mode);
+					0x80000000, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
 			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
@@ -275,7 +276,7 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
 					mode_1_start_addr, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
-					0x80000010, rsc->debug_mode);
+					0x80000000, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
 			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
@@ -284,9 +285,9 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
 					mode_2_start_addr, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2,
-					0x80000010, rsc->debug_mode);
+					0x80000000, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2,
-			rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+					0x0, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
 			rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
 
@@ -297,6 +298,7 @@
 {
 	int rc;
 	int count, wrapper_status;
+	unsigned long reg;
 
 	if (rsc->power_collapse_block)
 		return -EINVAL;
@@ -355,6 +357,18 @@
 	if (rc) {
 		pr_err("vdd fs is still enabled\n");
 		goto end;
+	} else {
+		rc = -EINVAL;
+		/* this wait is required to turn off the rscc clocks */
+		for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+			reg = dss_reg_r(&rsc->wrapper_io,
+				SDE_RSCC_PWR_CTRL, rsc->debug_mode);
+			if (test_bit(POWER_CTRL_BIT_12, &reg)) {
+				rc = 0;
+				break;
+			}
+			usleep_range(1, 2);
+		}
 	}
 
 	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
@@ -458,9 +472,6 @@
 						0x1, rsc->debug_mode);
 		dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
 							0x0, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io,
-			SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x7,
-			rsc->debug_mode);
 		reg = dss_reg_r(&rsc->wrapper_io,
 			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
 		reg |= (BIT(0) | BIT(8));
@@ -484,9 +495,6 @@
 		reg &= ~(BIT(1) | BIT(0));
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
 							reg, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io,
-			SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x5,
-			rsc->debug_mode);
 		/* make sure that solver mode is override */
 		wmb();
 
@@ -501,9 +509,6 @@
 		reg &= ~(BIT(8) | BIT(0));
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
 							reg, rsc->debug_mode);
-		dss_reg_w(&rsc->drv_io,
-			SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x5,
-			rsc->debug_mode);
 		/* make sure that solver mode is disabled */
 		wmb();
 		break;
@@ -567,7 +572,7 @@
 }
 
 int rsc_hw_mode_ctrl(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, bool mode)
+		char *buffer, int buffer_size, u32 mode)
 {
 	u32 blen = 0;
 	u32 slot_time;
@@ -583,28 +588,19 @@
 			rsc->debug_mode));
 		break;
 
-	case MODE0_UPDATE:
-		slot_time = mode ? rsc->timer_config.rsc_time_slot_0_ns :
+	case MODE_UPDATE:
+		slot_time = mode & BIT(0) ? 0x0 :
+					rsc->timer_config.rsc_time_slot_2_ns;
+		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
+						slot_time, rsc->debug_mode);
+
+		slot_time = mode & BIT(1) ?
+			rsc->timer_config.rsc_time_slot_0_ns :
 				rsc->timer_config.rsc_time_slot_2_ns;
 		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
 						slot_time, rsc->debug_mode);
-		slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
-				rsc->timer_config.rsc_time_slot_2_ns;
-		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-						slot_time, rsc->debug_mode);
-		rsc->power_collapse_block = mode;
-		break;
 
-	case MODE1_UPDATE:
-		slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
-				rsc->timer_config.rsc_time_slot_2_ns;
-		dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
-						slot_time, rsc->debug_mode);
-		rsc->power_collapse_block = mode;
-		break;
-
-	case MODE2_UPDATE:
-		rsc->power_collapse_block = mode;
+		rsc->power_collapse_block = !(mode & BIT(2));
 		break;
 
 	default:
@@ -668,7 +664,7 @@
 			return blen;
 
 		blen = snprintf(buffer, buffer_size - blen, "vsync0:0x%x\n",
-			 dss_reg_r(&rsc->drv_io,
+			 dss_reg_r(&rsc->wrapper_io,
 				SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
 				rsc->debug_mode));
 		if (blen >= buffer_size)
@@ -676,15 +672,15 @@
 
 		blen += snprintf(buffer + blen, buffer_size - blen,
 			"vsync1:0x%x\n",
-			 dss_reg_r(&rsc->drv_io,
+			 dss_reg_r(&rsc->wrapper_io,
 				SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
 				rsc->debug_mode));
 		break;
 
 	case VSYNC_ENABLE:
-		reg = BIT(8) | BIT(9) | ((mode & 0x7) < 10);
+		reg = BIT(8) | ((mode & 0x7) < 10);
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
-					mode, rsc->debug_mode);
+					reg, rsc->debug_mode);
 		break;
 
 	case VSYNC_DISABLE:
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index b83a866..b90b0ac 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -25,20 +25,27 @@
 
 #define MAX_RSC_COUNT		5
 
+#define ALL_MODES_DISABLED	0x0
+#define ONLY_MODE_0_ENABLED	0x1
+#define ONLY_MODE_0_1_ENABLED	0x3
+#define ALL_MODES_ENABLED	0x7
+
+#define MAX_COUNT_SIZE_SUPPORTED	128
+
 struct sde_rsc_priv;
 
 /**
  * rsc_mode_req: sde rsc mode request information
  * MODE_READ: read vsync status
- * MODE0_UPDATE: mode0 status , this should be 0x0
- * MODE1_UPDATE: mode1 status , this should be 0x1
- * MODE2_UPDATE: mode2 status , this should be 0x2
+ * MODE_UPDATE: mode timeslot update
+ *            0x0: all modes are disabled.
+ *            0x1: Mode-0 is enabled and other two modes are disabled.
+ *            0x3: Mode-0 & Mode-1 are enabled and mode-2 is disabled.
+ *            0x7: all modes are enabled.
  */
 enum rsc_mode_req {
 	MODE_READ,
-	MODE0_UPDATE = 0x1,
-	MODE1_UPDATE = 0x2,
-	MODE2_UPDATE = 0x3,
+	MODE_UPDATE = 0x1,
 };
 
 /**
@@ -78,7 +85,7 @@
 	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
 	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
 	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, bool mode);
+		char *buffer, int buffer_size, u32 mode);
 };
 
 /**
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 69b639a..14a19a4 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -111,14 +111,6 @@
 #define A6XX_VSC_ADDR_MODE_CNTL          0xC01
 
 /* RBBM registers */
-#define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL           0x10
-#define A6XX_RBBM_INTERFACE_HANG_INT_CNTL        0x1f
-#define A6XX_RBBM_INT_CLEAR_CMD                  0x37
-#define A6XX_RBBM_INT_0_MASK                     0x38
-#define A6XX_RBBM_SW_RESET_CMD                   0x43
-#define A6XX_RBBM_BLOCK_SW_RESET_CMD             0x45
-#define A6XX_RBBM_BLOCK_SW_RESET_CMD2            0x46
-#define A6XX_RBBM_CLOCK_CNTL                     0xAE
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
@@ -390,6 +382,8 @@
 #define A6XX_RBBM_PERFCTR_RBBM_SEL_2             0x509
 #define A6XX_RBBM_PERFCTR_RBBM_SEL_3             0x50A
 
+#define A6XX_RBBM_ISDB_CNT                       0x533
+
 #define A6XX_RBBM_SECVID_TRUST_CNTL              0xF400
 #define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO     0xF800
 #define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI     0xF801
@@ -397,6 +391,122 @@
 #define A6XX_RBBM_SECVID_TSB_CNTL                0xF803
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
+#define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL   0x00010
+#define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f
+#define A6XX_RBBM_INT_CLEAR_CMD          0x00037
+#define A6XX_RBBM_INT_0_MASK             0x00038
+#define A6XX_RBBM_SP_HYST_CNT            0x00042
+#define A6XX_RBBM_SW_RESET_CMD           0x00043
+#define A6XX_RBBM_RAC_THRESHOLD_CNT      0x00044
+#define A6XX_RBBM_BLOCK_SW_RESET_CMD     0x00045
+#define A6XX_RBBM_BLOCK_SW_RESET_CMD2    0x00046
+#define A6XX_RBBM_CLOCK_CNTL             0x000ae
+#define A6XX_RBBM_CLOCK_CNTL_SP0         0x000b0
+#define A6XX_RBBM_CLOCK_CNTL_SP1         0x000b1
+#define A6XX_RBBM_CLOCK_CNTL_SP2         0x000b2
+#define A6XX_RBBM_CLOCK_CNTL_SP3         0x000b3
+#define A6XX_RBBM_CLOCK_CNTL2_SP0        0x000b4
+#define A6XX_RBBM_CLOCK_CNTL2_SP1        0x000b5
+#define A6XX_RBBM_CLOCK_CNTL2_SP2        0x000b6
+#define A6XX_RBBM_CLOCK_CNTL2_SP3        0x000b7
+#define A6XX_RBBM_CLOCK_DELAY_SP0        0x000b8
+#define A6XX_RBBM_CLOCK_DELAY_SP1        0x000b9
+#define A6XX_RBBM_CLOCK_DELAY_SP2        0x000ba
+#define A6XX_RBBM_CLOCK_DELAY_SP3        0x000bb
+#define A6XX_RBBM_CLOCK_HYST_SP0         0x000bc
+#define A6XX_RBBM_CLOCK_HYST_SP1         0x000bd
+#define A6XX_RBBM_CLOCK_HYST_SP2         0x000be
+#define A6XX_RBBM_CLOCK_HYST_SP3         0x000bf
+#define A6XX_RBBM_CLOCK_CNTL_TP0         0x000c0
+#define A6XX_RBBM_CLOCK_CNTL_TP1         0x000c1
+#define A6XX_RBBM_CLOCK_CNTL_TP2         0x000c2
+#define A6XX_RBBM_CLOCK_CNTL_TP3         0x000c3
+#define A6XX_RBBM_CLOCK_CNTL2_TP0        0x000c4
+#define A6XX_RBBM_CLOCK_CNTL2_TP1        0x000c5
+#define A6XX_RBBM_CLOCK_CNTL2_TP2        0x000c6
+#define A6XX_RBBM_CLOCK_CNTL2_TP3        0x000c7
+#define A6XX_RBBM_CLOCK_CNTL3_TP0        0x000c8
+#define A6XX_RBBM_CLOCK_CNTL3_TP1        0x000c9
+#define A6XX_RBBM_CLOCK_CNTL3_TP2        0x000ca
+#define A6XX_RBBM_CLOCK_CNTL3_TP3        0x000cb
+#define A6XX_RBBM_CLOCK_CNTL4_TP0        0x000cc
+#define A6XX_RBBM_CLOCK_CNTL4_TP1        0x000cd
+#define A6XX_RBBM_CLOCK_CNTL4_TP2        0x000ce
+#define A6XX_RBBM_CLOCK_CNTL4_TP3        0x000cf
+#define A6XX_RBBM_CLOCK_DELAY_TP0        0x000d0
+#define A6XX_RBBM_CLOCK_DELAY_TP1        0x000d1
+#define A6XX_RBBM_CLOCK_DELAY_TP2        0x000d2
+#define A6XX_RBBM_CLOCK_DELAY_TP3        0x000d3
+#define A6XX_RBBM_CLOCK_DELAY2_TP0       0x000d4
+#define A6XX_RBBM_CLOCK_DELAY2_TP1       0x000d5
+#define A6XX_RBBM_CLOCK_DELAY2_TP2       0x000d6
+#define A6XX_RBBM_CLOCK_DELAY2_TP3       0x000d7
+#define A6XX_RBBM_CLOCK_DELAY3_TP0       0x000d8
+#define A6XX_RBBM_CLOCK_DELAY3_TP1       0x000d9
+#define A6XX_RBBM_CLOCK_DELAY3_TP2       0x000da
+#define A6XX_RBBM_CLOCK_DELAY3_TP3       0x000db
+#define A6XX_RBBM_CLOCK_DELAY4_TP0       0x000dc
+#define A6XX_RBBM_CLOCK_DELAY4_TP1       0x000dd
+#define A6XX_RBBM_CLOCK_DELAY4_TP2       0x000de
+#define A6XX_RBBM_CLOCK_DELAY4_TP3       0x000df
+#define A6XX_RBBM_CLOCK_HYST_TP0         0x000e0
+#define A6XX_RBBM_CLOCK_HYST_TP1         0x000e1
+#define A6XX_RBBM_CLOCK_HYST_TP2         0x000e2
+#define A6XX_RBBM_CLOCK_HYST_TP3         0x000e3
+#define A6XX_RBBM_CLOCK_HYST2_TP0        0x000e4
+#define A6XX_RBBM_CLOCK_HYST2_TP1        0x000e5
+#define A6XX_RBBM_CLOCK_HYST2_TP2        0x000e6
+#define A6XX_RBBM_CLOCK_HYST2_TP3        0x000e7
+#define A6XX_RBBM_CLOCK_HYST3_TP0        0x000e8
+#define A6XX_RBBM_CLOCK_HYST3_TP1        0x000e9
+#define A6XX_RBBM_CLOCK_HYST3_TP2        0x000ea
+#define A6XX_RBBM_CLOCK_HYST3_TP3        0x000eb
+#define A6XX_RBBM_CLOCK_HYST4_TP0        0x000ec
+#define A6XX_RBBM_CLOCK_HYST4_TP1        0x000ed
+#define A6XX_RBBM_CLOCK_HYST4_TP2        0x000ee
+#define A6XX_RBBM_CLOCK_HYST4_TP3        0x000ef
+#define A6XX_RBBM_CLOCK_CNTL_RB0         0x000f0
+#define A6XX_RBBM_CLOCK_CNTL_RB1         0x000f1
+#define A6XX_RBBM_CLOCK_CNTL_RB2         0x000f2
+#define A6XX_RBBM_CLOCK_CNTL_RB3         0x000f3
+#define A6XX_RBBM_CLOCK_CNTL2_RB0        0x000f4
+#define A6XX_RBBM_CLOCK_CNTL2_RB1        0x000f5
+#define A6XX_RBBM_CLOCK_CNTL2_RB2        0x000f6
+#define A6XX_RBBM_CLOCK_CNTL2_RB3        0x000f7
+#define A6XX_RBBM_CLOCK_CNTL_CCU0        0x000f8
+#define A6XX_RBBM_CLOCK_CNTL_CCU1        0x000f9
+#define A6XX_RBBM_CLOCK_CNTL_CCU2        0x000fa
+#define A6XX_RBBM_CLOCK_CNTL_CCU3        0x000fb
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU0     0x00100
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU1     0x00101
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU2     0x00102
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU3     0x00103
+#define A6XX_RBBM_CLOCK_CNTL_RAC         0x00104
+#define A6XX_RBBM_CLOCK_CNTL2_RAC        0x00105
+#define A6XX_RBBM_CLOCK_DELAY_RAC        0x00106
+#define A6XX_RBBM_CLOCK_HYST_RAC         0x00107
+#define A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00108
+#define A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00109
+#define A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0010a
+#define A6XX_RBBM_CLOCK_CNTL_UCHE        0x0010b
+#define A6XX_RBBM_CLOCK_CNTL2_UCHE       0x0010c
+#define A6XX_RBBM_CLOCK_CNTL3_UCHE       0x0010d
+#define A6XX_RBBM_CLOCK_CNTL4_UCHE       0x0010e
+#define A6XX_RBBM_CLOCK_DELAY_UCHE       0x0010f
+#define A6XX_RBBM_CLOCK_HYST_UCHE        0x00110
+#define A6XX_RBBM_CLOCK_MODE_VFD         0x00111
+#define A6XX_RBBM_CLOCK_DELAY_VFD        0x00112
+#define A6XX_RBBM_CLOCK_HYST_VFD         0x00113
+#define A6XX_RBBM_CLOCK_MODE_GPC         0x00114
+#define A6XX_RBBM_CLOCK_DELAY_GPC        0x00115
+#define A6XX_RBBM_CLOCK_HYST_GPC         0x00116
+#define A6XX_RBBM_CLOCK_DELAY_HLSQ_2	 0x00117
+#define A6XX_RBBM_CLOCK_CNTL_GMU_GX      0x00118
+#define A6XX_RBBM_CLOCK_DELAY_GMU_GX     0x00119
+#define A6XX_RBBM_CLOCK_HYST_GMU_GX      0x0011a
+#define A6XX_RBBM_CLOCK_MODE_HLSQ	 0x0011b
+#define A6XX_RBBM_CLOCK_DELAY_HLSQ       0x0011c
+
 /* DBGC_CFG registers */
 #define A6XX_DBGC_CFG_DBGBUS_SEL_A                  0x600
 #define A6XX_DBGC_CFG_DBGBUS_SEL_B                  0x601
@@ -666,6 +776,7 @@
 #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT  0x8
 
 /* GMU control registers */
+#define A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL   0x1A880
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
 #define A6XX_GMU_CM3_ITCM_START			0x1B400
 #define A6XX_GMU_CM3_DTCM_START			0x1C400
@@ -722,6 +833,9 @@
 #define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
 #define A6XX_GMU_AO_HOST_INTERRUPT_MASK		0x23B06
+#define A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL       0x23B09
+#define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL      0x23B0A
+#define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL       0x23B0B
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 876ff0c..9a44f34 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -326,8 +326,7 @@
 		.major = 3,
 		.minor = 0,
 		.patchid = ANY_ID,
-		.features = ADRENO_64BIT |
-			ADRENO_GPMU | ADRENO_RPMH,
+		.features = ADRENO_64BIT | ADRENO_RPMH,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 68d7653..ebaa1a9 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1403,7 +1403,7 @@
 	}
 
 	/* GPU comes up in secured mode, make it unsecured by default */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+	if (adreno_dev->zap_loaded)
 		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
 	else
 		adreno_writereg(adreno_dev,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 530529f..7c76580 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -410,6 +410,7 @@
  * @gpu_llc_slice_enable: To enable the GPU system cache slice or not
  * @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
  * @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
+ * @zap_loaded: Used to track if zap was successfully loaded or not
  */
 struct adreno_device {
 	struct kgsl_device dev;    /* Must be first field in this struct */
@@ -473,6 +474,7 @@
 	bool gpu_llc_slice_enable;
 	void *gpuhtw_llc_slice;
 	bool gpuhtw_llc_slice_enable;
+	unsigned int zap_loaded;
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 1e95e38..6c8b677 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -30,7 +30,6 @@
 #include "kgsl_trace.h"
 #include "adreno_a5xx_packets.h"
 
-static int zap_ucode_loaded;
 static int critical_packet_constructed;
 
 static struct kgsl_memdesc crit_pkts;
@@ -2179,7 +2178,7 @@
 	 * appropriate register,
 	 * skip if retention is supported for the CPZ register
 	 */
-	if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
+	if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
 		ADRENO_CPZ_RETENTION))) {
 		int ret;
 		struct scm_desc desc = {0};
@@ -2197,14 +2196,13 @@
 	}
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		ptr = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(ptr))
 			return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
-
-		zap_ucode_loaded = 1;
+		adreno_dev->zap_loaded = 1;
 	}
 
 	return 0;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b2d7467..dde10ee 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -59,6 +59,127 @@
 	{ adreno_is_a630, a630_vbif },
 };
 
+
+struct kgsl_hwcg_reg {
+	unsigned int off;
+	unsigned int val;
+};
+static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
+	{A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+	{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+	{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_DELAY_SP3, 0x0000F3CF},
+	{A6XX_RBBM_CLOCK_HYST_SP0, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP1, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP2, 0x00000080},
+	{A6XX_RBBM_CLOCK_HYST_SP3, 0x00000080},
+	{A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+	{A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+	{A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP0, 0x07777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP1, 0x07777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP2, 0x07777777},
+	{A6XX_RBBM_CLOCK_HYST3_TP3, 0x07777777},
+	{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+	{A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+	{A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+	{A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+	{A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+	{A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+	{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+	{A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+	{A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+	{A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+	{A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+	{A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+	{A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+	{A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+	{A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+	{A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+	{A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+	{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static const struct {
+	int (*devfunc)(struct adreno_device *adreno_dev);
+	const struct kgsl_hwcg_reg *regs;
+	unsigned int count;
+} a6xx_hwcg_registers[] = {
+	{adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}
+};
+
 static struct a6xx_protected_regs {
 	unsigned int base;
 	unsigned int count;
@@ -103,9 +224,27 @@
 	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
 }
 
+static void _update_always_on_regs(struct adreno_device *adreno_dev)
+{
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int *const regs = gpudev->reg_offsets->offsets;
+
+	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
+		A6XX_CP_ALWAYS_ON_COUNTER_LO;
+	regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
+		A6XX_CP_ALWAYS_ON_COUNTER_HI;
+}
+
 static void a6xx_init(struct adreno_device *adreno_dev)
 {
 	a6xx_crashdump_init(adreno_dev);
+
+	/*
+	 * If the GMU is not enabled, rewrite the offset for the always on
+	 * counters to point to the CP always on instead of GMU always on
+	 */
+	if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
+		_update_always_on_regs(adreno_dev);
 }
 
 /**
@@ -125,7 +264,7 @@
 	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
 
 	/* enable access protection to privileged registers */
-	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
+	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
 
 	if (mmu_prot) {
 		mmu_base = mmu_prot->base;
@@ -181,6 +320,48 @@
 	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
 }
 
+
+static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	const struct kgsl_hwcg_reg *regs;
+	int i, j;
+
+	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
+		if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(a6xx_hwcg_registers))
+		return;
+
+	regs = a6xx_hwcg_registers[i].regs;
+
+	/* Disable SP clock before programming HWCG registers */
+	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0);
+
+	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
+		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
+
+	if (kgsl_gmu_isenabled(device)) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+			0x00020222);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+			0x00010111);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+			0x00050555);
+	}
+	/* Enable SP clock */
+	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+	/* enable top level HWCG */
+	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
+	kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
+}
+
 /*
  * a6xx_start() - Device start
  * @adreno_dev: Pointer to adreno device
@@ -197,6 +378,8 @@
 	if (!kgsl_gmu_isenabled(device))
 		/* Legacy idle management if gmu is disabled */
 		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
+	/* enable hardware clockgating */
+	a6xx_hwcg_set(adreno_dev, true);
 
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
@@ -235,6 +418,9 @@
 	/* Set the AHB default slave response to "ERROR" */
 	kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
 
+	/* Turn on performance counters */
+	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
 	if (of_property_read_u32(device->pdev->dev.of_node,
 		"qcom,highest-bank-bit", &bit))
 		bit = MIN_HBB;
@@ -282,8 +468,9 @@
 	kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
 						(mal << 23) | (bit << 21));
 
+	/* Set hang detection threshold to 4 million cycles (0x3FFFF*16) */
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
-					  (1 << 30) | 0x4000);
+					  (1 << 30) | 0x3ffff);
 
 	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
 
@@ -303,7 +490,7 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	uint64_t gpuaddr;
-	static void *zap;
+	void *zap;
 	int ret = 0;
 
 	gpuaddr = fw->memdesc.gpuaddr;
@@ -313,14 +500,15 @@
 				upper_32_bits(gpuaddr));
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		zap = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(zap)) {
 			ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
 			zap = NULL;
-		}
+		} else
+			adreno_dev->zap_loaded = 1;
 	}
 
 	return ret;
@@ -1556,7 +1744,10 @@
 	else if (client_id != 3)
 		return fault_block[client_id];
 
+	mutex_lock(&device->mutex);
 	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
+	mutex_unlock(&device->mutex);
+
 	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
 }
 
@@ -2036,10 +2227,6 @@
 				A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
 				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
-	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
-				A6XX_CP_ALWAYS_ON_COUNTER_LO),
-	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
-				A6XX_CP_ALWAYS_ON_COUNTER_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index ba83cd7..decbff3 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -121,6 +121,8 @@
 	unsigned int statetype;
 	const unsigned int *regs;
 	unsigned int num_sets;
+	unsigned int offset0;
+	unsigned int offset1;
 } a6xx_dbgahb_ctx_clusters[] = {
 	{ CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
 		ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
@@ -178,6 +180,7 @@
 	unsigned int statetype;
 	const unsigned int *regs;
 	unsigned int num_sets;
+	unsigned int offset;
 } a6xx_non_ctx_dbgahb[] = {
 	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
 		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
@@ -624,8 +627,8 @@
 	return val;
 }
 
-static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
-				size_t remain, void *priv)
+static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
+				u8 *buf, size_t remain, void *priv)
 {
 	struct kgsl_snapshot_mvc_regs *header =
 				(struct kgsl_snapshot_mvc_regs *)buf;
@@ -678,9 +681,64 @@
 	return data_size + sizeof(*header);
 }
 
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
 				size_t remain, void *priv)
 {
+	struct kgsl_snapshot_mvc_regs *header =
+				(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_dbgahb_regs_info *info =
+				(struct a6xx_cluster_dbgahb_regs_info *)priv;
+	struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
+	unsigned int data_size = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int i, j;
+	unsigned int *src;
+
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
+				info);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cluster->id;
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
+		(header->ctxt_id ? cluster->offset1 : cluster->offset0));
+
+	for (i = 0; i < cluster->num_sets; i++) {
+		unsigned int start;
+		unsigned int end;
+
+		start = cluster->regs[2 * i];
+		end = cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+		for (j = start; j <= end; j++)
+			*data++ = *src++;
+	}
+out:
+	return data_size + sizeof(*header);
+}
+
+static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
+				u8 *buf, size_t remain, void *priv)
+{
 	struct kgsl_snapshot_regs *header =
 				(struct kgsl_snapshot_regs *)buf;
 	struct a6xx_non_ctx_dbgahb_registers *regs =
@@ -724,6 +782,57 @@
 	return (count * 8) + sizeof(*header);
 }
 
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header =
+				(struct kgsl_snapshot_regs *)buf;
+	struct a6xx_non_ctx_dbgahb_registers *regs =
+				(struct a6xx_non_ctx_dbgahb_registers *)priv;
+	unsigned int count = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int i, k;
+	unsigned int *src;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
+				regs);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
+
+	for (i = 0; i < regs->num_sets; i++) {
+		unsigned int start;
+		unsigned int end;
+
+		start = regs->regs[2 * i];
+		end = regs->regs[(2 * i) + 1];
+
+		if (remain < (end - start + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
+
+		remain -= ((end - start) + 1) * 8;
+
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
+		}
+	}
+out:
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
 static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
 				struct kgsl_snapshot *snapshot)
 {
@@ -1391,6 +1500,81 @@
 	return qwords;
 }
 
+static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, j, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+		struct a6xx_cluster_dbgahb_registers *cluster =
+				&a6xx_dbgahb_ctx_clusters[i];
+
+		cluster->offset0 = *offset;
+
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+			if (j == 1)
+				cluster->offset1 = *offset;
+
+			/* Program the aperture */
+			ptr[qwords++] =
+				((cluster->statetype + j * 2) & 0xff) << 8;
+			ptr[qwords++] =
+				(((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+					(1 << 21) | 1;
+
+			for (k = 0; k < cluster->num_sets; k++) {
+				unsigned int start = cluster->regs[2 * k];
+
+				count = REG_PAIR_COUNT(cluster->regs, k);
+				ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+				ptr[qwords++] =
+				(((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+					start - cluster->regbase / 4) << 44)) |
+							count;
+
+				*offset += count * sizeof(unsigned int);
+			}
+		}
+	}
+	return qwords;
+}
+
+static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		regs->offset = *offset;
+
+		/* Program the aperture */
+		ptr[qwords++] = (regs->statetype & 0xff) << 8;
+		ptr[qwords++] =	(((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+					(1 << 21) | 1;
+
+		for (k = 0; k < regs->num_sets; k++) {
+			unsigned int start = regs->regs[2 * k];
+
+			count = REG_PAIR_COUNT(regs->regs, k);
+			ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+			ptr[qwords++] =
+				(((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+					start - regs->regbase / 4) << 44)) |
+							count;
+
+			*offset += count * sizeof(unsigned int);
+		}
+	}
+	return qwords;
+}
+
 void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1458,6 +1642,46 @@
 		}
 	}
 
+	/* Calculate the script and data size for debug AHB registers */
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+		struct a6xx_cluster_dbgahb_registers *cluster =
+				&a6xx_dbgahb_ctx_clusters[i];
+
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+			/* 16 bytes for programming the aperture */
+			script_size += 16;
+
+			/* Reading each pair of registers takes 16 bytes */
+			script_size += 16 * cluster->num_sets;
+
+			/* A dword per register read from the cluster list */
+			for (k = 0; k < cluster->num_sets; k++)
+				data_size += REG_PAIR_COUNT(cluster->regs, k) *
+						sizeof(unsigned int);
+		}
+	}
+
+	/*
+	 * Calculate the script and data size for non context debug
+	 * AHB registers
+	 */
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		/* 16 bytes for programming the aperture */
+		script_size += 16;
+
+		/* Reading each pair of registers takes 16 bytes */
+		script_size += 16 * regs->num_sets;
+
+		/* A dword per register read from the cluster list */
+		for (k = 0; k < regs->num_sets; k++)
+			data_size += REG_PAIR_COUNT(regs->regs, k) *
+				sizeof(unsigned int);
+	}
+
 	/* Now allocate the script and data buffers */
 
 	/* The script buffers needs 2 extra qwords on the end */
@@ -1497,6 +1721,10 @@
 	/* Program the capturescript for the MVC regsiters */
 	ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
 
+	ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
+
+	ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
+
 	*ptr++ = 0;
 	*ptr++ = 0;
 }
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index ed5b714..1cb0259 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1045,6 +1045,13 @@
 	 */
 	if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
 		set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+	/*
+	 *  Set the fault tolerance policy to FT_REPLAY - As context wants
+	 *  to invalidate it after a replay attempt fails. This doesn't
+	 *  require to execute the default FT policy.
+	 */
+	else if (drawctxt->base.flags & KGSL_CONTEXT_INVALIDATE_ON_FAULT)
+		set_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
 	else
 		cmdobj->fault_policy = adreno_dev->ft_policy;
 }
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index f77d438..f217822 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -342,13 +342,14 @@
 	struct kgsl_device *device = dev_priv->device;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int ret;
-	unsigned long local;
+	unsigned int local;
 
 	local = *flags & (KGSL_CONTEXT_PREAMBLE |
 		KGSL_CONTEXT_NO_GMEM_ALLOC |
 		KGSL_CONTEXT_PER_CONTEXT_TS |
 		KGSL_CONTEXT_USER_GENERATED_TS |
 		KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+		KGSL_CONTEXT_INVALIDATE_ON_FAULT |
 		KGSL_CONTEXT_CTX_SWITCH |
 		KGSL_CONTEXT_PRIORITY_MASK |
 		KGSL_CONTEXT_TYPE_MASK |
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 32175f5..fbff535 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -54,10 +54,21 @@
 
 	/* Read always on registers */
 	if (!adreno_is_a3xx(adreno_dev)) {
-		adreno_readreg64(adreno_dev,
-			ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
-			ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
-			&time->ticks);
+		if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev))) {
+			uint32_t val_lo, val_hi;
+
+			adreno_read_gmureg(adreno_dev,
+				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, &val_lo);
+			adreno_read_gmureg(adreno_dev,
+				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &val_hi);
+
+			time->ticks = (val_lo | ((uint64_t)val_hi << 32));
+		} else {
+			adreno_readreg64(adreno_dev,
+				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+				&time->ticks);
+		}
 
 		/* Mask hi bits as they may be incorrect on some targets */
 		if (ADRENO_GPUREV(adreno_dev) >= 400 &&
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 15f68bf..d836cbb 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -43,6 +43,7 @@
 #include "kgsl_sync.h"
 #include "kgsl_compat.h"
 #include "kgsl_pool.h"
+#include "adreno.h"
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
@@ -1045,7 +1046,10 @@
 	int result = 0;
 
 	mutex_lock(&device->mutex);
-	device->open_count--;
+
+	if (!adreno_is_a6xx(ADRENO_DEVICE(device)))
+		device->open_count--;
+
 	if (device->open_count == 0) {
 
 		/* Wait for the active count to go to 0 */
@@ -1627,7 +1631,8 @@
 
 		/* If no profiling buffer was specified, clear the flag */
 		if (cmdobj->profiling_buf_entry == NULL)
-			DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+			DRAWOBJ(cmdobj)->flags &=
+				~(unsigned long)KGSL_DRAWOBJ_PROFILING;
 	}
 
 	result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
@@ -1716,7 +1721,8 @@
 
 		/* If no profiling buffer was specified, clear the flag */
 		if (cmdobj->profiling_buf_entry == NULL)
-			DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+			DRAWOBJ(cmdobj)->flags &=
+				~(unsigned long)KGSL_DRAWOBJ_PROFILING;
 	}
 
 	result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
@@ -2041,7 +2047,7 @@
 	unsigned long flags_requested = (VM_READ | VM_WRITE);
 
 	if (flags & KGSL_MEMFLAGS_GPUREADONLY)
-		flags_requested &= ~VM_WRITE;
+		flags_requested &= ~(unsigned long)VM_WRITE;
 
 	if ((vma->vm_flags & flags_requested) == flags_requested)
 		return 0;
@@ -2135,7 +2141,7 @@
 	entry->memdesc.pagetable = pagetable;
 	entry->memdesc.size = (uint64_t) size;
 	entry->memdesc.useraddr = hostptr;
-	entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR;
+	entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;
 
 	if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
 		int ret;
@@ -2166,7 +2172,7 @@
 static void _setup_cache_mode(struct kgsl_mem_entry *entry,
 		struct vm_area_struct *vma)
 {
-	unsigned int mode;
+	uint64_t mode;
 	pgprot_t pgprot = vma->vm_page_prot;
 
 	if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
@@ -2525,7 +2531,7 @@
 	entry->memdesc.size = 0;
 	/* USE_CPU_MAP is not impemented for ION. */
 	entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-	entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ION;
+	entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION;
 
 	sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 
@@ -3028,8 +3034,9 @@
 	if ((flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT ==
 					KGSL_CACHEMODE_WRITETHROUGH) {
 		flags &= ~((uint64_t) KGSL_CACHEMODE_MASK);
-		flags |= (KGSL_CACHEMODE_WRITEBACK << KGSL_CACHEMODE_SHIFT) &
-							KGSL_CACHEMODE_MASK;
+		flags |= (uint64_t)((KGSL_CACHEMODE_WRITEBACK <<
+						KGSL_CACHEMODE_SHIFT) &
+					KGSL_CACHEMODE_MASK);
 	}
 	return flags;
 }
@@ -3083,8 +3090,9 @@
 			KGSL_MAX_ALIGN >> 10);
 
 		flags &= ~((uint64_t) KGSL_MEMALIGN_MASK);
-		flags |= (ilog2(KGSL_MAX_ALIGN) << KGSL_MEMALIGN_SHIFT) &
-			KGSL_MEMALIGN_MASK;
+		flags |= (uint64_t)((ilog2(KGSL_MAX_ALIGN) <<
+						KGSL_MEMALIGN_SHIFT) &
+					KGSL_MEMALIGN_MASK);
 	}
 
 	/* For now only allow allocations up to 4G */
@@ -3975,7 +3983,8 @@
 
 	if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
 		entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
-		entry->memdesc.flags |= param->type << KGSL_MEMTYPE_SHIFT;
+		entry->memdesc.flags |= (uint64_t)(param->type <<
+						KGSL_MEMTYPE_SHIFT);
 	}
 
 	kgsl_mem_entry_put(entry);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d955aa0..db105c5 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -80,6 +80,7 @@
 	{ KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
 	{ KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
 	{ KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
+	{ KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
 	{ KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
 	{ KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 86d4d61..938c96d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1608,6 +1608,8 @@
 			ret = PTR_ERR(mmu->defaultpagetable);
 			mmu->defaultpagetable = NULL;
 			return ret;
+		} else if (mmu->defaultpagetable == NULL) {
+			return -ENOMEM;
 		}
 	}
 
@@ -2598,7 +2600,7 @@
 
 static const struct {
 	char *feature;
-	int bit;
+	unsigned long bit;
 } kgsl_iommu_features[] = {
 	{ "qcom,retention", KGSL_MMU_RETENTION },
 	{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index b3e2b6a..a9a3c94 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -67,7 +67,9 @@
 	"isense_clk",
 	"rbcpr_clk",
 	"iref_clk",
-	"gmu_clk"
+	"gmu_clk",
+	"ahb_clk",
+	"cxo_clk"
 };
 
 static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 62ee597..6b22fd4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -25,7 +25,7 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
-#define KGSL_MAX_CLKS 15
+#define KGSL_MAX_CLKS 17
 #define KGSL_MAX_REGULATORS 2
 
 #define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 07a54d9..7636a42 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -927,8 +927,7 @@
 				&data->bin.ctxt_aware_target_pwrlevel))
 			data->bin.ctxt_aware_target_pwrlevel = 1;
 
-		if ((data->bin.ctxt_aware_target_pwrlevel < 0) ||
-			(data->bin.ctxt_aware_target_pwrlevel >
+		if ((data->bin.ctxt_aware_target_pwrlevel >
 						pwr->num_pwrlevels))
 			data->bin.ctxt_aware_target_pwrlevel = 1;
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 10b37ae..dd41e4e 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -131,8 +131,9 @@
 	if (align > 32)
 		align = 32;
 
-	memdesc->flags &= ~KGSL_MEMALIGN_MASK;
-	memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
+	memdesc->flags &= ~(uint64_t)KGSL_MEMALIGN_MASK;
+	memdesc->flags |= (uint64_t)((align << KGSL_MEMALIGN_SHIFT) &
+					KGSL_MEMALIGN_MASK);
 	return 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 973a2ff..96873c4 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -803,11 +803,23 @@
 	return "kgsl-syncsource-timeline";
 }
 
+static void kgsl_syncsource_fence_value_str(struct fence *fence,
+						char *str, int size)
+{
+	/*
+	 * Each fence is independent of the others on the same timeline.
+	 * We use a different context for each of them.
+	 */
+	snprintf(str, size, "%llu", fence->context);
+}
+
 static const struct fence_ops kgsl_syncsource_fence_ops = {
 	.get_driver_name = kgsl_syncsource_driver_name,
 	.get_timeline_name = kgsl_syncsource_get_timeline_name,
 	.enable_signaling = kgsl_syncsource_enable_signaling,
 	.wait = fence_default_wait,
 	.release = kgsl_syncsource_fence_release,
+
+	.fence_value_str = kgsl_syncsource_fence_value_str,
 };
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 4314616..b99c1df 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3115,7 +3115,7 @@
 {
 	int sioaddr[2] = { REG_2E, REG_4E };
 	struct it87_sio_data sio_data;
-	unsigned short isa_address;
+	unsigned short isa_address[2];
 	bool found = false;
 	int i, err;
 
@@ -3125,15 +3125,29 @@
 
 	for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
 		memset(&sio_data, 0, sizeof(struct it87_sio_data));
-		isa_address = 0;
-		err = it87_find(sioaddr[i], &isa_address, &sio_data);
-		if (err || isa_address == 0)
+		isa_address[i] = 0;
+		err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+		if (err || isa_address[i] == 0)
 			continue;
+		/*
+		 * Don't register second chip if its ISA address matches
+		 * the first chip's ISA address.
+		 */
+		if (i && isa_address[i] == isa_address[0])
+			break;
 
-		err = it87_device_add(i, isa_address, &sio_data);
+		err = it87_device_add(i, isa_address[i], &sio_data);
 		if (err)
 			goto exit_dev_unregister;
+
 		found = true;
+
+		/*
+		 * IT8705F may respond on both SIO addresses.
+		 * Stop probing after finding one.
+		 */
+		if (sio_data.type == it87)
+			break;
 	}
 
 	if (!found) {
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
index 5857d30..22e9d6f 100644
--- a/drivers/hwtracing/coresight/coresight-hwevent.c
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -216,14 +216,10 @@
 
 	mutex_init(&drvdata->mutex);
 
-	drvdata->clk = devm_clk_get(dev, "core_clk");
+	drvdata->clk = devm_clk_get(dev, "apb_pclk");
 	if (IS_ERR(drvdata->clk))
 		return PTR_ERR(drvdata->clk);
 
-	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
-	if (ret)
-		return ret;
-
 	drvdata->nr_hclk = of_property_count_strings(pdev->dev.of_node,
 						     "qcom,hwevent-clks");
 	drvdata->nr_hreg = of_property_count_strings(pdev->dev.of_node,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 833f10d..475ea75 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -495,7 +495,7 @@
 	}
 }
 
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
 {
 	CS_UNLOCK(drvdata->base);
 
@@ -557,6 +557,207 @@
 	}
 }
 
+static void tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+	get_qdss_bam_connection_info(&bamdata->dest,
+				    &bamdata->dest_pipe_idx,
+				    &bamdata->src_pipe_idx,
+				    &bamdata->desc_fifo,
+				    &bamdata->data_fifo,
+				    NULL);
+}
+
+static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+	uint32_t axictl;
+
+	if (drvdata->enable_to_bam)
+		return;
+
+	/* Configure and enable required CSR registers */
+	msm_qdss_csr_enable_bam_to_usb();
+
+	/* Configure and enable ETR for usb bam output */
+
+	CS_UNLOCK(drvdata->base);
+
+	writel_relaxed(bamdata->data_fifo.size / 4, drvdata->base + TMC_RSZ);
+	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+	axictl |= (0xF << 8);
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+	axictl &= ~(0x1 << 7);
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+	axictl = (axictl & ~0x3) | 0x2;
+	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+	writel_relaxed((uint32_t)bamdata->data_fifo.phys_base,
+		       drvdata->base + TMC_DBALO);
+	writel_relaxed((((uint64_t)bamdata->data_fifo.phys_base) >> 32) & 0xFF,
+		       drvdata->base + TMC_DBAHI);
+	/* Set FOnFlIn for periodic flush */
+	writel_relaxed(0x133, drvdata->base + TMC_FFCR);
+	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+	tmc_enable_hw(drvdata);
+
+	CS_LOCK(drvdata->base);
+
+	drvdata->enable_to_bam = true;
+}
+
+static int tmc_etr_bam_enable(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+	int ret;
+
+	if (bamdata->enable)
+		return 0;
+
+	/* Reset bam to start with */
+	ret = sps_device_reset(bamdata->handle);
+	if (ret)
+		goto err0;
+
+	/* Now configure and enable bam */
+
+	bamdata->pipe = sps_alloc_endpoint();
+	if (!bamdata->pipe)
+		return -ENOMEM;
+
+	ret = sps_get_config(bamdata->pipe, &bamdata->connect);
+	if (ret)
+		goto err1;
+
+	bamdata->connect.mode = SPS_MODE_SRC;
+	bamdata->connect.source = bamdata->handle;
+	bamdata->connect.event_thresh = 0x4;
+	bamdata->connect.src_pipe_index = TMC_ETR_BAM_PIPE_INDEX;
+	bamdata->connect.options = SPS_O_AUTO_ENABLE;
+
+	bamdata->connect.destination = bamdata->dest;
+	bamdata->connect.dest_pipe_index = bamdata->dest_pipe_idx;
+	bamdata->connect.desc = bamdata->desc_fifo;
+	bamdata->connect.data = bamdata->data_fifo;
+
+	ret = sps_connect(bamdata->pipe, &bamdata->connect);
+	if (ret)
+		goto err1;
+
+	bamdata->enable = true;
+	return 0;
+err1:
+	sps_free_endpoint(bamdata->pipe);
+err0:
+	return ret;
+}
+
+static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
+{
+	int count;
+
+	/* Ensure no flush is in progress */
+	for (count = TIMEOUT_US;
+	     BVAL(readl_relaxed(drvdata->base + TMC_FFSR), 0) != 0
+	     && count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while waiting for TMC flush, TMC_FFSR: %#x\n",
+	     readl_relaxed(drvdata->base + TMC_FFSR));
+}
+
+void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata)
+{
+	if (!drvdata->enable_to_bam)
+		return;
+
+	/* Ensure periodic flush is disabled in CSR block */
+	msm_qdss_csr_disable_flush();
+
+	CS_UNLOCK(drvdata->base);
+
+	tmc_wait_for_flush(drvdata);
+	tmc_disable_hw(drvdata);
+
+	CS_LOCK(drvdata);
+
+	/* Disable CSR configuration */
+	msm_qdss_csr_disable_bam_to_usb();
+	drvdata->enable_to_bam = false;
+}
+
+void tmc_etr_bam_disable(struct tmc_drvdata *drvdata)
+{
+	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
+
+	if (!bamdata->enable)
+		return;
+
+	sps_disconnect(bamdata->pipe);
+	sps_free_endpoint(bamdata->pipe);
+	bamdata->enable = false;
+}
+
+void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
+		  struct usb_qdss_ch *ch)
+{
+	struct tmc_drvdata *drvdata = priv;
+	unsigned long flags;
+	int ret = 0;
+
+	mutex_lock(&drvdata->mem_lock);
+	if (event == USB_QDSS_CONNECT) {
+		tmc_etr_fill_usb_bam_data(drvdata);
+		ret = tmc_etr_bam_enable(drvdata);
+		if (ret)
+			dev_err(drvdata->dev, "ETR BAM enable failed\n");
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		__tmc_etr_enable_to_bam(drvdata);
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	} else if (event == USB_QDSS_DISCONNECT) {
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		__tmc_etr_disable_to_bam(drvdata);
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		tmc_etr_bam_disable(drvdata);
+	}
+	mutex_unlock(&drvdata->mem_lock);
+}
+
+int tmc_etr_bam_init(struct amba_device *adev,
+		     struct tmc_drvdata *drvdata)
+{
+	int ret;
+	struct device *dev = &adev->dev;
+	struct resource res;
+	struct tmc_etr_bam_data *bamdata;
+
+	bamdata = devm_kzalloc(dev, sizeof(*bamdata), GFP_KERNEL);
+	if (!bamdata)
+		return -ENOMEM;
+	drvdata->bamdata = bamdata;
+
+	ret = of_address_to_resource(adev->dev.of_node, 1, &res);
+	if (ret)
+		return -ENODEV;
+
+	bamdata->props.phys_addr = res.start;
+	bamdata->props.virt_addr = devm_ioremap(dev, res.start,
+						resource_size(&res));
+	if (!bamdata->props.virt_addr)
+		return -ENOMEM;
+	bamdata->props.virt_size = resource_size(&res);
+
+	bamdata->props.event_threshold = 0x4; /* Pipe event threshold */
+	bamdata->props.summing_threshold = 0x10; /* BAM event threshold */
+	bamdata->props.irq = 0;
+	bamdata->props.num_pipes = TMC_ETR_BAM_NR_PIPES;
+
+	return sps_register_bam_device(&bamdata->props, &bamdata->handle);
+}
+
 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
 {
 	int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 10e8da4..01dc5e1 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -63,11 +63,13 @@
 
 void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
+	drvdata->enable = true;
 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 }
 
 void tmc_disable_hw(struct tmc_drvdata *drvdata)
 {
+	drvdata->enable = false;
 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 }
 
@@ -309,6 +311,100 @@
 }
 static DEVICE_ATTR_RW(mem_size);
 
+static ssize_t out_mode_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			str_tmc_etr_out_mode[drvdata->out_mode]);
+}
+
+static ssize_t out_mode_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[10] = "";
+	unsigned long flags;
+	int ret;
+
+	if (strlen(buf) >= 10)
+		return -EINVAL;
+	if (sscanf(buf, "%10s", str) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mem_lock);
+	if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM])) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+			goto out;
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
+		}
+		__tmc_etr_disable_to_bam(drvdata);
+		tmc_etr_enable_hw(drvdata);
+		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+		tmc_etr_bam_disable(drvdata);
+		usb_qdss_close(drvdata->usbch);
+	} else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+			goto out;
+
+		spin_lock_irqsave(&drvdata->spinlock, flags);
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
+		}
+		if (drvdata->reading) {
+			ret = -EBUSY;
+			goto err1;
+		}
+		tmc_etr_disable_hw(drvdata);
+		drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+		drvdata->usbch = usb_qdss_open("qdss", drvdata,
+					       usb_notifier);
+		if (IS_ERR(drvdata->usbch)) {
+			dev_err(drvdata->dev, "usb_qdss_open failed\n");
+			ret = PTR_ERR(drvdata->usbch);
+			goto err0;
+		}
+	}
+out:
+	mutex_unlock(&drvdata->mem_lock);
+	return size;
+err1:
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+err0:
+	mutex_unlock(&drvdata->mem_lock);
+	return ret;
+}
+static DEVICE_ATTR_RW(out_mode);
+
+static ssize_t available_out_modes_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(str_tmc_etr_out_mode); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
+				str_tmc_etr_out_mode[i]);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+static DEVICE_ATTR_RO(available_out_modes);
+
 static ssize_t mem_type_show(struct device *dev,
 			     struct device_attribute *attr,
 			     char *buf)
@@ -355,6 +451,8 @@
 	&dev_attr_mem_size.attr,
 	&dev_attr_mem_type.attr,
 	&dev_attr_trigger_cntr.attr,
+	&dev_attr_out_mode.attr,
+	&dev_attr_available_out_modes.attr,
 	NULL,
 };
 
@@ -460,6 +558,10 @@
 		desc.ops = &tmc_etr_cs_ops;
 		desc.groups = coresight_tmc_etr_groups;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+
+		ret = tmc_etr_bam_init(adev, drvdata);
+		if (ret)
+			goto out;
 	} else {
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.ops = &tmc_etf_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 726dcd6..3d6e823 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -19,7 +19,12 @@
 #define _CORESIGHT_TMC_H
 
 #include <linux/miscdevice.h>
+#include <linux/delay.h>
 #include <asm/cacheflush.h>
+#include <linux/of_address.h>
+#include <linux/amba/bus.h>
+#include <linux/usb_bam.h>
+#include <linux/usb/usb_qdss.h>
 
 #define TMC_RSZ			0x004
 #define TMC_STS			0x00c
@@ -77,6 +82,8 @@
 #define TMC_ETR_SG_NXT_TBL(phys_pte)	(((phys_pte >> PAGE_SHIFT) << 4) | 0x3)
 #define TMC_ETR_SG_LST_ENT(phys_pte)	(((phys_pte >> PAGE_SHIFT) << 4) | 0x1)
 
+#define TMC_ETR_BAM_PIPE_INDEX	0
+#define TMC_ETR_BAM_NR_PIPES	2
 
 enum tmc_config_type {
 	TMC_CONFIG_TYPE_ETB,
@@ -107,6 +114,30 @@
 	[TMC_ETR_MEM_TYPE_SG]		= "sg",
 };
 
+enum tmc_etr_out_mode {
+	TMC_ETR_OUT_MODE_NONE,
+	TMC_ETR_OUT_MODE_MEM,
+	TMC_ETR_OUT_MODE_USB,
+};
+
+static const char * const str_tmc_etr_out_mode[] = {
+	[TMC_ETR_OUT_MODE_NONE]		= "none",
+	[TMC_ETR_OUT_MODE_MEM]		= "mem",
+	[TMC_ETR_OUT_MODE_USB]		= "usb",
+};
+
+struct tmc_etr_bam_data {
+	struct sps_bam_props	props;
+	unsigned long		handle;
+	struct sps_pipe		*pipe;
+	struct sps_connect	connect;
+	uint32_t		src_pipe_idx;
+	unsigned long		dest;
+	uint32_t		dest_pipe_idx;
+	struct sps_mem_buffer	desc_fifo;
+	struct sps_mem_buffer	data_fifo;
+	bool			enable;
+};
 
 /**
  * struct tmc_drvdata - specifics associated to an TMC component
@@ -132,6 +163,7 @@
 	struct miscdevice	miscdev;
 	spinlock_t		spinlock;
 	bool			reading;
+	bool			enable;
 	char			*buf;
 	dma_addr_t		paddr;
 	void __iomem		*vaddr;
@@ -147,6 +179,11 @@
 	enum tmc_etr_mem_type	memtype;
 	u32			delta_bottom;
 	int			sg_blk_num;
+	enum tmc_etr_out_mode	out_mode;
+	struct usb_qdss_ch	*usbch;
+	struct tmc_etr_bam_data	*bamdata;
+	bool			enable_to_bam;
+
 };
 
 /* Generic functions */
@@ -166,5 +203,13 @@
 			     char **bufpp, size_t *len);
 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
+void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
+		  struct usb_qdss_ch *ch);
+int tmc_etr_bam_init(struct amba_device *adev,
+		     struct tmc_drvdata *drvdata);
 extern const struct coresight_ops tmc_etr_cs_ops;
 #endif
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 94b2e2f9..e233e76 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -20,6 +20,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/clk.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
@@ -940,6 +941,14 @@
 	atomic_t *refcnts = NULL;
 	struct coresight_device *csdev;
 	struct coresight_connection *conns = NULL;
+	struct clk *pclk;
+
+	pclk = clk_get(desc->dev, "apb_pclk");
+	if (!IS_ERR(pclk)) {
+		ret = clk_set_rate(pclk, QDSS_CLK_LEVEL_DYNAMIC);
+		if (ret)
+			dev_err(desc->dev, "clk set rate failed\n");
+	}
 
 	csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
 	if (!csdev) {
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 25eab45..e7b96f1 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -685,6 +685,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
 		},
 	},
+	{
+		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index aded314..8ba6da4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -730,6 +730,7 @@
 {
 	int atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
+	struct iommu_group *group;
 	struct iommu_pgtbl_info info;
 	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
@@ -746,7 +747,18 @@
 	mapping->fast->domain = domain;
 	mapping->fast->dev = dev;
 
-	if (iommu_attach_device(domain, dev))
+	group = dev->iommu_group;
+	if (!group) {
+		dev_err(dev, "No iommu associated with device\n");
+		return -ENODEV;
+	}
+
+	if (iommu_get_domain_for_dev(dev)) {
+		dev_err(dev, "Device already attached to other iommu_domain\n");
+		return -EINVAL;
+	}
+
+	if (iommu_attach_group(mapping->domain, group))
 		return -EINVAL;
 
 	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
@@ -781,7 +793,7 @@
 void fast_smmu_detach_device(struct device *dev,
 			     struct dma_iommu_mapping *mapping)
 {
-	iommu_detach_device(mapping->domain, dev);
+	iommu_detach_group(mapping->domain, dev->iommu_group);
 	dev->archdata.mapping = NULL;
 	set_dma_ops(dev, NULL);
 
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 181e889..bea5f03 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -548,7 +548,7 @@
 		}
 	}
 
-	if (iommu_attach_device(domain, dev)) {
+	if (iommu_attach_group(domain, dev->iommu_group)) {
 		seq_puts(s,
 			 "Couldn't attach new domain to device. Is it already attached?\n");
 		goto out_domain_free;
@@ -669,7 +669,7 @@
 	}
 
 out_detach:
-	iommu_detach_device(domain, dev);
+	iommu_detach_group(domain, dev->iommu_group);
 out_domain_free:
 	iommu_domain_free(domain);
 }
@@ -1451,6 +1451,8 @@
 static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
 					int val, bool is_secure)
 {
+	struct iommu_group *group = ddev->dev->iommu_group;
+
 	ddev->domain = iommu_domain_alloc(&platform_bus_type);
 	if (!ddev->domain) {
 		pr_err("Couldn't allocate domain\n");
@@ -1464,8 +1466,8 @@
 		goto out_domain_free;
 	}
 
-	if (iommu_attach_device(ddev->domain, ddev->dev)) {
-		pr_err("Couldn't attach new domain to device. Is it already attached?\n");
+	if (iommu_attach_group(ddev->domain, group)) {
+		dev_err(ddev->dev, "Couldn't attach new domain to device\n");
 		goto out_domain_free;
 	}
 
@@ -1483,6 +1485,8 @@
 					  bool is_secure)
 {
 	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	struct iommu_domain *domain;
 	ssize_t retval;
 	int val;
 
@@ -1494,12 +1498,15 @@
 
 	if (val) {
 		if (ddev->domain) {
-			pr_err("Already attached.\n");
+			pr_err("Iommu-Debug is already attached?\n");
 			retval = -EINVAL;
 			goto out;
 		}
-		if (WARN(ddev->dev->archdata.iommu,
-			 "Attachment tracking out of sync with device\n")) {
+
+		domain = iommu_get_domain_for_dev(dev);
+		if (domain) {
+			pr_err("Another driver is using this device's iommu\n"
+				"Iommu-Debug cannot be used concurrently\n");
 			retval = -EINVAL;
 			goto out;
 		}
@@ -1510,11 +1517,11 @@
 		pr_err("Attached\n");
 	} else {
 		if (!ddev->domain) {
-			pr_err("No domain. Did you already attach?\n");
+			pr_err("Iommu-Debug is not attached?\n");
 			retval = -EINVAL;
 			goto out;
 		}
-		iommu_detach_device(ddev->domain, ddev->dev);
+		iommu_detach_group(ddev->domain, dev->iommu_group);
 		iommu_domain_free(ddev->domain);
 		ddev->domain = NULL;
 		pr_err("Detached\n");
@@ -1566,7 +1573,6 @@
 {
 	return __iommu_debug_attach_write(file, ubuf, count, offset,
 					  true);
-
 }
 
 static const struct file_operations iommu_debug_secure_attach_fops = {
@@ -1868,6 +1874,10 @@
 	if (!of_find_property(dev->of_node, "iommus", NULL))
 		return 0;
 
+	/* Hold a reference count */
+	if (!iommu_group_get(dev))
+		return 0;
+
 	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
 	if (!ddev)
 		return -ENODEV;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 261c125..7f9d9e1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1770,3 +1770,16 @@
 	*id = fwspec->ids[0];
 	return 0;
 }
+
+/*
+ * Until a formal solution for probe deferral becomes part
+ * of the iommu framework...
+ */
+int iommu_is_available(struct device *dev)
+{
+	if (!dev->bus->iommu_ops ||
+		!dev->iommu_fwspec ||
+		!dev->iommu_group)
+		return -EPROBE_DEFER;
+	return 0;
+}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c9281fb..daccf64 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -458,9 +458,6 @@
 				u64 offset = ptr - gic_data.redist_regions[i].redist_base;
 				gic_data_rdist_rd_base() = ptr;
 				gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
-				pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
-					smp_processor_id(), mpidr, i,
-					&gic_data_rdist()->phys_base);
 				return 0;
 			}
 
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 3060cfa..cb19cef 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -2264,7 +2264,7 @@
 {
 	return platform_driver_register(&qpnp_wled_driver);
 }
-module_init(qpnp_wled_init);
+subsys_initcall(qpnp_wled_init);
 
 static void __exit qpnp_wled_exit(void)
 {
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index dfed3cd..b328a2a 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -134,6 +134,9 @@
 	struct workqueue_struct *wq;
 	struct tcs_response_pool *resp_pool;
 	atomic_t tcs_in_use[MAX_POOL_SIZE];
+	/* Debug info */
+	u64 tcs_last_sent_ts[MAX_POOL_SIZE];
+	u64 tcs_last_recv_ts[MAX_POOL_SIZE];
 	atomic_t tcs_send_count[MAX_POOL_SIZE];
 	atomic_t tcs_irq_count[MAX_POOL_SIZE];
 };
@@ -170,8 +173,9 @@
 	struct tcs_response_pool *pool = drv->resp_pool;
 	struct tcs_response *resp = ERR_PTR(-ENOMEM);
 	int pos;
+	unsigned long flags;
 
-	spin_lock(&pool->lock);
+	spin_lock_irqsave(&pool->lock, flags);
 	pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
 	if (pos != MAX_POOL_SIZE) {
 		bitmap_set(pool->avail, pos, 1);
@@ -182,7 +186,7 @@
 		resp->err = err;
 		resp->in_use = false;
 	}
-	spin_unlock(&pool->lock);
+	spin_unlock_irqrestore(&pool->lock, flags);
 
 	return resp;
 }
@@ -190,34 +194,62 @@
 static void free_response(struct tcs_response *resp)
 {
 	struct tcs_response_pool *pool = resp->drv->resp_pool;
+	unsigned long flags;
 
-	spin_lock(&pool->lock);
+	spin_lock_irqsave(&pool->lock, flags);
 	resp->err = -EINVAL;
 	bitmap_clear(pool->avail, resp->idx, 1);
-	spin_unlock(&pool->lock);
+	spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
+static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m,
+					bool for_use)
 {
 	struct tcs_response_pool *pool = drv->resp_pool;
 	struct tcs_response *resp = NULL;
 	int pos = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&pool->lock, flags);
 	do {
 		pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
 		if (pos == MAX_POOL_SIZE)
 			break;
+
 		resp = &pool->resp[pos];
 		if (resp->m == m && !resp->in_use) {
-			resp->in_use = true;
+			resp->in_use = for_use;
 			break;
 		}
 		pos++;
+		udelay(1);
 	} while (1);
+	spin_unlock_irqrestore(&pool->lock, flags);
 
 	return resp;
 }
 
+static void print_response(struct tcs_drv *drv, int m)
+{
+	struct tcs_response *resp;
+	struct tcs_mbox_msg *msg;
+	int i;
+
+	resp = get_response(drv, m, false);
+	if (!resp)
+		return;
+
+	msg = resp->msg;
+	pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+			resp->idx, resp->m, resp->in_use);
+	pr_info("Msg: state=%d\n", msg->state);
+	for (i = 0; i < msg->num_payload; i++)
+		pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+				msg->payload[i].addr,
+				msg->payload[i].data,
+				msg->payload[i].complete);
+}
+
 static inline u32 read_drv_config(void __iomem *base)
 {
 	return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
@@ -257,17 +289,19 @@
 
 static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
 {
-	struct tcs_mbox *tcs;
+	struct tcs_mbox *tcs = NULL;
 	int i;
 
-	for (i = 0; i < TCS_TYPE_NR; i++) {
+	for (i = 0; i < drv->num_tcs; i++) {
 		tcs = &drv->tcs[i];
-		if (tcs->tcs_mask & BIT(m))
+		if (tcs->tcs_mask & (u32)BIT(m))
 			break;
 	}
 
-	if (i == TCS_TYPE_NR)
+	if (i == drv->num_tcs) {
+		WARN(1, "Incorrect TCS index %d", m);
 		tcs = NULL;
+	}
 
 	return tcs;
 }
@@ -333,6 +367,20 @@
 	tasklet_schedule(&resp->tasklet);
 }
 
+static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
+{
+	void __iomem *base = drv->reg_base;
+	u32 data;
+
+	/* Enable interrupts for non-ACTIVE TCS */
+	data = read_tcs_reg(base, TCS_DRV_IRQ_ENABLE, 0, 0);
+	if (enable)
+		data |= BIT(m);
+	else
+		data &= ~BIT(m);
+	write_tcs_reg(base, TCS_DRV_IRQ_ENABLE, 0, 0, data);
+}
+
 /**
  * tcs_irq_handler: TX Done / Recv data handler
  */
@@ -350,22 +398,15 @@
 	/* Know which TCSes were triggered */
 	irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
 
-	for (m = 0; irq_status >= BIT(m); m++) {
-		if (!(irq_status & BIT(m)))
+	for (m = 0; m < drv->num_tcs; m++) {
+		if (!(irq_status & (u32)BIT(m)))
 			continue;
-
 		atomic_inc(&drv->tcs_irq_count[m]);
 
-		resp = get_response(drv, m);
+		resp = get_response(drv, m, true);
 		if (!resp) {
 			pr_err("No resp request for TCS-%d\n", m);
-			continue;
-		}
-
-		tcs = get_tcs_from_index(drv, m);
-		if (!tcs) {
-			pr_err("TCS-%d doesn't exist in DRV\n", m);
-			continue;
+			goto no_resp;
 		}
 
 		/* Check if all commands were completed */
@@ -391,20 +432,32 @@
 						resp->err);
 
 		/* Clear the AMC mode for non-ACTIVE TCSes */
-		if (tcs->type != ACTIVE_TCS) {
+		tcs = get_tcs_from_index(drv, m);
+		if (tcs && tcs->type != ACTIVE_TCS) {
 			data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
 			data &= ~TCS_AMC_MODE_ENABLE;
 			write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
+			/*
+			 * Disable interrupt for this TCS to avoid being
+			 * spammed with interrupts coming when the solver
+			 * sends its wake votes.
+			 */
+			enable_tcs_irq(drv, m, false);
 		} else {
 			/* Clear the enable bit for the commands */
 			write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
 		}
 
+no_resp:
+		/* Record the recvd time stamp */
+		drv->tcs_last_recv_ts[m] = arch_counter_get_cntvct();
+
 		/* Clear the TCS IRQ status */
 		write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
 
 		/* Clean up response object and notify mbox in tasklet */
-		send_tcs_response(resp);
+		if (resp)
+			send_tcs_response(resp);
 
 		/* Notify the client that this request is completed. */
 		atomic_set(&drv->tcs_in_use[m], 0);
@@ -523,9 +576,11 @@
 			continue;
 
 		curr_enabled = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
-		for (j = 0; j < curr_enabled; j++) {
-			if (!(curr_enabled & BIT(j)))
+
+		for (j = 0; j < MAX_CMDS_PER_TCS; j++) {
+			if (!(curr_enabled & (u32)BIT(j)))
 				continue;
+
 			addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, j);
 			for (k = 0; k < msg->num_payload; k++) {
 				if (addr == msg->payload[k].addr)
@@ -607,7 +662,6 @@
 static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
 				bool trigger)
 {
-	const struct device *dev = chan->cl->dev;
 	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
 	int d = drv->drv_id;
 	struct tcs_mbox *tcs;
@@ -626,7 +680,6 @@
 	spin_lock_irqsave(&tcs->tcs_lock, flags);
 	slot = find_slots(tcs, msg);
 	if (slot < 0) {
-		dev_err(dev, "No TCS slot found.\n");
 		spin_unlock_irqrestore(&tcs->tcs_lock, flags);
 		if (resp)
 			free_response(resp);
@@ -659,6 +712,10 @@
 		/* Mark the TCS as busy */
 		atomic_set(&drv->tcs_in_use[m], 1);
 		atomic_inc(&drv->tcs_send_count[m]);
+		/* Enable interrupt for active votes through wake TCS */
+		if (tcs->type != ACTIVE_TCS)
+			enable_tcs_irq(drv, m, true);
+		drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
 	}
 
 	/* Write to the TCS or AMC */
@@ -701,6 +758,24 @@
 	return 0;
 }
 
+static void dump_tcs_stats(struct tcs_drv *drv)
+{
+	int i;
+	unsigned long long curr = arch_counter_get_cntvct();
+
+	for (i = 0; i < drv->num_tcs; i++) {
+		if (!atomic_read(&drv->tcs_in_use[i]))
+			continue;
+		pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+				curr, i,
+				atomic_read(&drv->tcs_send_count[i]),
+				drv->tcs_last_sent_ts[i],
+				atomic_read(&drv->tcs_irq_count[i]),
+				drv->tcs_last_recv_ts[i]);
+		print_response(drv, i);
+	}
+}
+
 /**
  * chan_tcs_write: Validate the incoming message and write to the
  * appropriate TCS block.
@@ -714,35 +789,41 @@
  */
 static int chan_tcs_write(struct mbox_chan *chan, void *data)
 {
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
 	struct tcs_mbox_msg *msg = data;
 	const struct device *dev = chan->cl->dev;
-	int ret = -EINVAL;
+	int ret = 0;
 	int count = 0;
 
 	if (!msg) {
-		dev_err(dev, "Payload error.\n");
+		dev_err(dev, "Payload error\n");
+		ret = -EINVAL;
 		goto tx_fail;
 	}
 
 	if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
-		dev_err(dev, "Payload error.\n");
+		dev_err(dev, "Payload error\n");
+		ret = -EINVAL;
 		goto tx_fail;
 	}
 
 	if (msg->invalidate || msg->is_control) {
-		dev_err(dev, "Incorrect API.\n");
+		dev_err(dev, "Incorrect API\n");
+		ret = -EINVAL;
 		goto tx_fail;
 	}
 
 	if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
 			msg->state != RPMH_AWAKE_STATE) {
-		dev_err(dev, "Incorrect API.\n");
+		dev_err(dev, "Incorrect API\n");
+		ret = -EINVAL;
 		goto tx_fail;
 	}
 
 	/* Read requests should always be single */
 	if (msg->is_read && msg->num_payload > 1) {
-		dev_err(dev, "Incorrect read request.\n");
+		dev_err(dev, "Incorrect read request\n");
+		ret = -EINVAL;
 		goto tx_fail;
 	}
 
@@ -756,25 +837,29 @@
 	/* Post the message to the TCS and trigger */
 	do {
 		ret = tcs_mbox_write(chan, msg, true);
-		if (ret == -EBUSY) {
-			ret = -EIO;
-			udelay(10);
-		} else
+		if (ret != -EBUSY)
 			break;
+		udelay(100);
 	} while (++count < 10);
 
 tx_fail:
-	if (ret) {
-		struct tcs_drv *drv = container_of(chan->mbox,
-					struct tcs_drv, mbox);
+	/* If there was an error in the request, schedule a response */
+	if (ret < 0 && ret != -EBUSY) {
 		struct tcs_response *resp = setup_response(
 				drv, msg, chan, TCS_M_INIT, ret);
 
 		dev_err(dev, "Error sending RPMH message %d\n", ret);
 		send_tcs_response(resp);
+		ret = 0;
 	}
 
-	return 0;
+	/* If we were just busy waiting for TCS, dump the state and return */
+	if (ret == -EBUSY) {
+		dev_err(dev, "TCS Busy, retrying RPMH message send\n");
+		dump_tcs_stats(drv);
+	}
+
+	return ret;
 }
 
 static void __tcs_write_hidden(struct tcs_drv *drv, int d,
@@ -803,7 +888,7 @@
 		return PTR_ERR(tcs);
 
 	if (msg->num_payload != tcs->ncpt) {
-		dev_err(dev, "Request must fit the control TCS size.\n");
+		dev_err(dev, "Request must fit the control TCS size\n");
 		return -EINVAL;
 	}
 
@@ -827,12 +912,12 @@
 	int ret = -EINVAL;
 
 	if (!msg) {
-		dev_err(dev, "Payload error.\n");
+		dev_err(dev, "Payload error\n");
 		goto tx_done;
 	}
 
 	if (msg->num_payload > MAX_RPMH_PAYLOAD) {
-		dev_err(dev, "Payload error.\n");
+		dev_err(dev, "Payload error\n");
 		goto tx_done;
 	}
 
@@ -902,7 +987,6 @@
 	u32 config, max_tcs, ncpt;
 	int tcs_type_count[TCS_TYPE_NR] = { 0 };
 	struct resource *res;
-	u32 irq_mask;
 
 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv)
@@ -1043,14 +1127,9 @@
 	if (ret)
 		return ret;
 
-	/*
-	 * Enable interrupts for AMC TCS,
-	 * if there are no AMC TCS, use wake TCS.
-	 */
-	irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
-				drv->tcs[ACTIVE_TCS].tcs_mask :
-				drv->tcs[WAKE_TCS].tcs_mask;
-	write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
+	/* Enable interrupts for AMC TCS */
+	write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
+					drv->tcs[ACTIVE_TCS].tcs_mask);
 
 	for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
 		atomic_set(&drv->tcs_in_use[i], 0);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 89ec6d2..be13ebf 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1847,7 +1847,7 @@
 	if (r)
 		goto out;
 
-	param->data_size = sizeof(*param);
+	param->data_size = offsetof(struct dm_ioctl, data);
 	r = fn(param, input_param_size);
 
 	if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e66f404..aac7161 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -69,6 +69,13 @@
 	struct dm_stats_aux stats_aux;
 };
 
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+	if (rq && rq->end_io_data)
+		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+	return NULL;
+}
+
 #define MINOR_ALLOCED ((void *)-1)
 
 /*
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index c897669..c05c069 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -2,3 +2,7 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/Makefile b/drivers/media/platform/msm/camera/cam_cdm/Makefile
new file mode 100644
index 0000000..ad4ec04
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
+				cam_cdm_core_common.o cam_cdm_virtual_core.o \
+				cam_cdm_hw_core.o
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
new file mode 100644
index 0000000..fc7a493
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+
+#ifdef CONFIG_CAM_CDM_DBG
+#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#ifdef CONFIG_CAM_CDM_DUMP_DBG
+#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
+#define CAM_SW_CDM_INDEX                  0
+#define CAM_CDM_INFLIGHT_WORKS            5
+#define CAM_CDM_HW_RESET_TIMEOUT          3000
+
+#define CAM_CDM_HW_ID_MASK      0xF
+#define CAM_CDM_HW_ID_SHIFT     0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+	CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+	((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+	 ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+	CAM_REG_ATTR_READ,
+	CAM_REG_ATTR_WRITE,
+	CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+	CAM_CDM_HW_INTF_CMD_ACQUIRE,
+	CAM_CDM_HW_INTF_CMD_RELEASE,
+	CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+	CAM_CDM_HW_INTF_CMD_RESET_HW,
+	CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+	/*cfg_offsets 0*/
+	CDM_CFG_HW_VERSION,
+	CDM_CFG_TITAN_VERSION,
+	CDM_CFG_RST_CMD,
+	CDM_CFG_CGC_CFG,
+	CDM_CFG_CORE_CFG,
+	CDM_CFG_CORE_EN,
+	CDM_CFG_FE_CFG,
+	/*irq_offsets 7*/
+	CDM_IRQ_MASK,
+	CDM_IRQ_CLEAR,
+	CDM_IRQ_CLEAR_CMD,
+	CDM_IRQ_SET,
+	CDM_IRQ_SET_CMD,
+	CDM_IRQ_STATUS,
+	CDM_IRQ_USR_DATA,
+	/*BL FIFO Registers 14*/
+	CDM_BL_FIFO_BASE_REG,
+	CDM_BL_FIFO_LEN_REG,
+	CDM_BL_FIFO_STORE_REG,
+	CDM_BL_FIFO_CFG,
+	CDM_BL_FIFO_RB,
+	CDM_BL_FIFO_BASE_RB,
+	CDM_BL_FIFO_LEN_RB,
+	CDM_BL_FIFO_PENDING_REQ_RB,
+	/*CDM System Debug Registers 22*/
+	CDM_DBG_WAIT_STATUS,
+	CDM_DBG_SCRATCH_0_REG,
+	CDM_DBG_SCRATCH_1_REG,
+	CDM_DBG_SCRATCH_2_REG,
+	CDM_DBG_SCRATCH_3_REG,
+	CDM_DBG_SCRATCH_4_REG,
+	CDM_DBG_SCRATCH_5_REG,
+	CDM_DBG_SCRATCH_6_REG,
+	CDM_DBG_SCRATCH_7_REG,
+	CDM_DBG_LAST_AHB_ADDR,
+	CDM_DBG_LAST_AHB_DATA,
+	CDM_DBG_CORE_DBUG,
+	CDM_DBG_LAST_AHB_ERR_ADDR,
+	CDM_DBG_LAST_AHB_ERR_DATA,
+	CDM_DBG_CURRENT_BL_BASE,
+	CDM_DBG_CURRENT_BL_LEN,
+	CDM_DBG_CURRENT_USED_AHB_BASE,
+	CDM_DBG_DEBUG_STATUS,
+	/*FE Bus Miser Registers 40*/
+	CDM_BUS_MISR_CFG_0,
+	CDM_BUS_MISR_CFG_1,
+	CDM_BUS_MISR_RD_VAL,
+	/*Performance Counter registers 43*/
+	CDM_PERF_MON_CTRL,
+	CDM_PERF_MON_0,
+	CDM_PERF_MON_1,
+	CDM_PERF_MON_2,
+	/*Spare registers 47*/
+	CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+	uint32_t offset;
+	enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+	uint32_t first_offset;
+	uint32_t last_offset;
+	uint32_t reg_count;
+	const struct cam_cdm_reg_offset *offsets;
+	uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+	CAM_CDM_FLAG_SHARED_CDM,
+	CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+	CAM_VIRTUAL_CDM,
+	CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+	CAM_HW_CDM_BASE_INDEX,
+	CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+	struct cam_cdm_acquire_data data;
+	void __iomem  *changebase_addr;
+	uint32_t stream_on;
+	uint32_t refcount;
+	struct mutex lock;
+	uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+	CAM_HW_CDM_BL_CB_CLIENT = 1,
+	CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+	uint8_t bl_tag;
+	enum cam_cdm_bl_cb_type request_type;
+	uint32_t client_hdl;
+	void *userdata;
+	uint32_t cookie;
+	struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+	uint32_t handle;
+	struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory.struct */
+struct cam_cdm_hw_mem {
+	int32_t handle;
+	uint32_t vaddr;
+	uint64_t kmdvaddr;
+	size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+	uint32_t index;
+	char name[128];
+	enum cam_cdm_id id;
+	enum cam_cdm_flags flags;
+	struct completion reset_complete;
+	struct completion bl_complete;
+	struct workqueue_struct *work_queue;
+	struct list_head bl_request_list;
+	struct cam_hw_version version;
+	uint32_t hw_version;
+	uint32_t hw_family_version;
+	struct cam_iommu_handle iommu_hdl;
+	struct cam_cdm_reg_offset_table *offset_tbl;
+	struct cam_cdm_utils_ops *ops;
+	struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+	uint8_t bl_tag;
+	atomic_t error;
+	struct cam_cdm_hw_mem gen_irq;
+	uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+	bool dt_cdm_shared;
+	uint32_t dt_num_supported_clients;
+	const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+	struct mutex lock;
+	uint32_t refcount;
+	struct cam_hw_intf *device;
+	struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+	bool probe_done;
+	struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+	uint32_t cdm_count;
+	uint32_t dt_supported_hw_cdm;
+	int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index);
+
+#endif /* _CAM_CDM_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
new file mode 100644
index 0000000..341406a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -0,0 +1,547 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client get refcount=%d\n",
+		client->refcount);
+	client->refcount++;
+	mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client put refcount=%d\n",
+		client->refcount);
+	if (client->refcount > 0) {
+		client->refcount--;
+	} else {
+		pr_err("Refcount put when zero\n");
+		WARN_ON(1);
+	}
+	mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version)
+{
+	switch (ver) {
+	case CAM_CDM170_VERSION:
+		cam_version->major    = (ver & 0xF0000000);
+		cam_version->minor    = (ver & 0xFFF0000);
+		cam_version->incr     = (ver & 0xFFFF);
+		cam_version->reserved = 0;
+		return true;
+	default:
+		pr_err("CDM Version=%x not supported in util\n", ver);
+	break;
+	}
+	return false;
+}
+
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+{
+	pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+		evt_data);
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+	if (by_cam_version == false) {
+		switch (ver) {
+		case CAM_CDM170_VERSION:
+			return &CDM170_ops;
+		default:
+			pr_err("CDM Version=%x not supported in util\n", ver);
+		}
+	} else if (cam_version) {
+		if ((cam_version->major == 1) && (cam_version->minor == 0) &&
+			(cam_version->incr == 0))
+			return &CDM170_ops;
+		pr_err("cam_hw_version=%x:%x:%x not supported\n",
+			cam_version->major, cam_version->minor,
+			cam_version->incr);
+	}
+
+	return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+
+	list_for_each_entry(node, bl_list, entry) {
+		if (node->bl_tag == tag)
+			return node;
+	}
+	pr_err("Could not find the bl request for tag=%d\n", tag);
+
+	return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core;
+
+	if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+		(sizeof(struct cam_iommu_handle) == arg_size)) {
+		cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+		*((struct cam_iommu_handle *)get_hw_cap_args) =
+			cdm_core->iommu_hdl;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+	int i;
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (hw->clients[i] == NULL) {
+			CDM_CDBG("Found client slot %d\n", i);
+			return i;
+		}
+	}
+	pr_err("No more client slots\n");
+
+	return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data)
+{
+	int i;
+	struct cam_cdm *core = NULL;
+	struct cam_cdm_client *client = NULL;
+
+	if (!cdm_hw) {
+		pr_err("CDM Notify called with NULL hw info\n");
+		return;
+	}
+	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		int client_idx;
+		struct cam_cdm_bl_cb_request_entry *node =
+			(struct cam_cdm_bl_cb_request_entry *)data;
+
+		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+		client = core->clients[client_idx];
+		if ((!client) || (client->handle != node->client_hdl)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				node->client_hdl);
+			return;
+		}
+		cam_cdm_get_client_refcount(client);
+		if (client->data.cam_cdm_callback) {
+			CDM_CDBG("Calling client=%s cb cookie=%d\n",
+				client->data.identifier, node->cookie);
+			client->data.cam_cdm_callback(node->client_hdl,
+				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+				node->cookie);
+			CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+		} else {
+			pr_err("No cb registered for client hdl=%x\n",
+				node->client_hdl);
+		}
+		cam_cdm_put_client_refcount(client);
+		return;
+	}
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (core->clients[i] != NULL) {
+			client = core->clients[i];
+			mutex_lock(&client->lock);
+			CDM_CDBG("Found client slot %d\n", i);
+			if (client->data.cam_cdm_callback) {
+				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+					unsigned long iova =
+						(unsigned long)data;
+
+					client->data.cam_cdm_callback(
+						client->handle,
+						client->data.userdata,
+						CAM_CDM_CB_STATUS_PAGEFAULT,
+						(iova & 0xFFFFFFFF));
+				}
+			} else {
+				pr_err("No cb registered for client hdl=%x\n",
+					client->handle);
+			}
+			mutex_unlock(&client->lock);
+		}
+	}
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+	void *start_args, bool operation)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *core = NULL;
+	int rc = -1;
+	int client_idx;
+	struct cam_cdm_client *client;
+	uint32_t *handle = start_args;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+	client = core->clients[client_idx];
+	if (!client) {
+		pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+		return -EINVAL;
+	}
+	cam_cdm_get_client_refcount(client);
+	if (*handle != client->handle) {
+		pr_err("client id given handle=%x invalid\n", *handle);
+		cam_cdm_put_client_refcount(client);
+		return -EINVAL;
+	}
+	if (operation == true) {
+		if (true == client->stream_on) {
+			pr_err("Invalid CDM client is already streamed ON\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	} else {
+		if (client->stream_on == false) {
+			pr_err("Invalid CDM client is already streamed Off\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	if (operation == true) {
+		if (!cdm_hw->open_count) {
+			struct cam_ahb_vote ahb_vote;
+			struct cam_axi_vote axi_vote;
+
+			ahb_vote.type = CAM_VOTE_ABSOLUTE;
+			ahb_vote.vote.level = CAM_SVS_VOTE;
+			axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+			rc = cam_cpas_start(core->cpas_handle,
+				&ahb_vote, &axi_vote);
+			if (rc != 0) {
+				pr_err("CPAS start failed\n");
+				goto end;
+			}
+			CDM_CDBG("CDM init first time\n");
+			if (core->id == CAM_CDM_VIRTUAL) {
+				CDM_CDBG("Virtual CDM HW init first time\n");
+				rc = 0;
+			} else {
+				CDM_CDBG("CDM HW init first time\n");
+				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+				if (rc == 0) {
+					rc = cam_hw_cdm_alloc_genirq_mem(
+						hw_priv);
+					if (rc != 0) {
+						pr_err("Genirqalloc failed\n");
+						cam_hw_cdm_deinit(hw_priv,
+							NULL, 0);
+					}
+				} else {
+					pr_err("CDM HW init failed\n");
+				}
+			}
+			if (rc == 0) {
+				cdm_hw->open_count++;
+				client->stream_on = true;
+			} else {
+				if (cam_cpas_stop(core->cpas_handle))
+					pr_err("CPAS stop failed\n");
+			}
+		} else {
+			cdm_hw->open_count++;
+			CDM_CDBG("CDM HW already ON count=%d\n",
+				cdm_hw->open_count);
+			rc = 0;
+			client->stream_on = true;
+		}
+	} else {
+		if (cdm_hw->open_count) {
+			cdm_hw->open_count--;
+			CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+			if (!cdm_hw->open_count) {
+				CDM_CDBG("CDM Deinit now\n");
+				if (core->id == CAM_CDM_VIRTUAL) {
+					CDM_CDBG("Virtual CDM HW Deinit\n");
+					rc = 0;
+				} else {
+					CDM_CDBG("CDM HW Deinit now\n");
+					rc = cam_hw_cdm_deinit(
+						hw_priv, NULL, 0);
+					if (cam_hw_cdm_release_genirq_mem(
+						hw_priv))
+						pr_err("Genirq release failed\n");
+				}
+				if (rc) {
+					pr_err("Deinit failed in streamoff\n");
+				} else {
+					client->stream_on = false;
+					rc = cam_cpas_stop(core->cpas_handle);
+					if (rc)
+						pr_err("CPAS stop failed\n");
+				}
+			} else {
+				client->stream_on = false;
+				CDM_CDBG("Client stream off success =%d\n",
+					cdm_hw->open_count);
+			}
+		} else {
+			CDM_CDBG("stream OFF CDM Invalid %d\n",
+				cdm_hw->open_count);
+			rc = -ENXIO;
+		}
+	}
+end:
+	cam_cdm_put_client_refcount(client);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+	return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+	return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+	uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_data = NULL;
+	struct cam_cdm *core = NULL;
+	int rc = -EINVAL;
+
+	if ((!hw_priv) || (!cmd_args) ||
+		(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+		return rc;
+
+	soc_data = &cdm_hw->soc_info;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	switch (cmd) {
+	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+		struct cam_cdm_hw_intf_cmd_submit_bl *req;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+		if ((req->data->type < 0) ||
+			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+			pr_err("Invalid req bl cmd addr type=%d\n",
+				req->data->type);
+			break;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+		client = core->clients[idx];
+		if ((!client) || (req->handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				req->handle);
+			break;
+		}
+		cam_cdm_get_client_refcount(client);
+		if ((req->data->flag == true) &&
+			(!client->data.cam_cdm_callback)) {
+			pr_err("CDM request cb without registering cb\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (client->stream_on != true) {
+			pr_err("Invalid CDM needs to be streamed ON first\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (core->id == CAM_CDM_VIRTUAL)
+			rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+		else
+			rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+		cam_cdm_put_client_refcount(client);
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+		struct cam_cdm_acquire_data *data;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+
+		mutex_lock(&cdm_hw->hw_mutex);
+		data = (struct cam_cdm_acquire_data *)cmd_args;
+		CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+		idx = cam_cdm_find_free_client_slot(core);
+		if ((idx < 0) || (core->clients[idx])) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+			break;
+		}
+		core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+			GFP_KERNEL);
+		if (!core->clients[idx]) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			rc = -ENOMEM;
+			break;
+		}
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		mutex_init(&client->lock);
+		data->ops = core->ops;
+		if (core->id == CAM_CDM_VIRTUAL) {
+			data->cdm_version.major = 1;
+			data->cdm_version.minor = 0;
+			data->cdm_version.incr = 0;
+			data->cdm_version.reserved = 0;
+			data->ops = cam_cdm_get_ops(0,
+					&data->cdm_version, true);
+			if (!data->ops) {
+				mutex_destroy(&client->lock);
+				mutex_lock(&cdm_hw->hw_mutex);
+				kfree(core->clients[idx]);
+				core->clients[idx] = NULL;
+				mutex_unlock(
+					&cdm_hw->hw_mutex);
+				rc = -1;
+				break;
+			}
+		} else {
+			data->cdm_version = core->version;
+		}
+
+		cam_cdm_get_client_refcount(client);
+		mutex_lock(&client->lock);
+		memcpy(&client->data, data,
+			sizeof(struct cam_cdm_acquire_data));
+		client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+					core->index,
+					idx);
+		client->stream_on = false;
+		data->handle = client->handle;
+		CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+			data->identifier, core->index);
+		mutex_unlock(&client->lock);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RELEASE: {
+		uint32_t *handle = cmd_args;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+				cmd, arg_size, *handle);
+			return -EINVAL;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+		mutex_lock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		if ((!client) || (*handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		cam_cdm_put_client_refcount(client);
+		mutex_lock(&client->lock);
+		if (client->refcount != 0) {
+			pr_err("CDM Client refcount not zero %d",
+				client->refcount);
+			rc = -1;
+			mutex_unlock(&client->lock);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		core->clients[idx] = NULL;
+		mutex_unlock(&client->lock);
+		mutex_destroy(&client->lock);
+		kfree(client);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+		pr_err("CDM HW reset not supported for handle =%x\n",
+			*((uint32_t *)cmd_args));
+		break;
+	}
+	default:
+		pr_err("CDM HW intf command not valid =%d\n", cmd);
+		break;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
new file mode 100644
index 0000000..eb75aaa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+	bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+	uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version);
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
new file mode 100644
index 0000000..7f2c455
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -0,0 +1,1025 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <media/cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+	{
+		.compatible = CAM_HW_CDM_CPAS_0_NAME,
+		.data = &cam170_cpas_cdm_offset_table,
+	},
+	{}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+	if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+		return CAM_CDM_CPAS_0;
+
+	return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+	uint32_t *pending_bl)
+{
+	int rc = 0;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+		pending_bl)) {
+		pr_err("Failed to read CDM pending BL's\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg = 0;
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("dump core en=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+	pr_err("dump scratch0=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+	pr_err("dump scratch1=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+	pr_err("dump scratch2=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+	pr_err("dump scratch3=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+	pr_err("dump scratch4=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+	pr_err("dump scratch5=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+	pr_err("dump scratch6=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+	pr_err("dump scratch7=%x\n", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+	struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg, core_dbg, loop_cnt;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("CDM HW core status=%x\n", dump_reg);
+	/* First pause CDM */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+	loop_cnt = dump_reg;
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+	pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+	if (core_dbg & 0x100) {
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+		pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+		pr_err("AHB dump reglastdata=%x\n", dump_reg);
+	} else {
+		pr_err("CDM HW AHB dump not enable\n");
+	}
+
+	if (core_dbg & 0x10000) {
+		int i;
+
+		pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+		for (i = 0 ; i < loop_cnt ; i++) {
+			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+				&dump_reg);
+			pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+				&dump_reg);
+			pr_err("BL(%d) len=%d tag=%d\n", i,
+				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+		}
+	} else {
+		pr_err("CDM HW BL FIFO readback not enable\n");
+	}
+
+	pr_err("CDM HW default dump\n");
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+	pr_err("CDM HW core cfg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+	pr_err("CDM HW irq status=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+	pr_err("CDM HW irq set reg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+	pr_err("CDM HW current BL base=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+	pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
+		(dump_reg & 0xFF000000));
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+	pr_err("CDM HW current AHB base=%x\n", dump_reg);
+
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+
+	/* Enable CDM back */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+	uint32_t bl_count)
+{
+	uint32_t pending_bl = 0;
+	int32_t available_bl_slots = 0;
+	int rc = -1;
+	long time_left;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	do {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+			&pending_bl)) {
+			pr_err("Failed to read CDM pending BL's\n");
+			rc = -1;
+			break;
+		}
+		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+		if (available_bl_slots < 0) {
+			pr_err("Invalid available slots %d:%d:%d\n",
+				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+				pending_bl);
+			break;
+		}
+		if (bl_count < (available_bl_slots - 1)) {
+			CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+				(available_bl_slots - 1), bl_count);
+				rc = bl_count;
+				break;
+		} else if (0 == (available_bl_slots - 1)) {
+			time_left = wait_for_completion_timeout(
+				&core->bl_complete, msecs_to_jiffies(
+				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+			if (time_left <= 0) {
+				pr_err("CDM HW BL Wait timed out failed\n");
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			CDM_CDBG("CDM HW is ready for data\n");
+		} else {
+			rc = (bl_count - (available_bl_slots - 1));
+			break;
+		}
+	} while (1);
+
+	return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+	uint32_t len, uint32_t tag)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+		pr_err("Failed to write CDM base to BL base\n");
+		return true;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+		pr_err("Failed to write CDM BL len\n");
+		return true;
+	}
+	return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+		pr_err("Failed to write CDM commit BL\n");
+		return true;
+	}
+	return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t len;
+	int rc;
+
+	if (core->bl_tag > 63) {
+		pr_err("bl_tag invalid =%d\n", core->bl_tag);
+		rc = -EINVAL;
+		goto end;
+	}
+	CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
+		core->bl_tag, req->data->cmd_arrary_count);
+	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+			GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+	node->client_hdl = req->handle;
+	node->cookie = req->data->cookie;
+	node->bl_tag = core->bl_tag;
+	node->userdata = req->data->userdata;
+	list_add_tail(&node->entry, &core->bl_request_list);
+	len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+	core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+		core->bl_tag);
+	rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+		((4 * core->ops->cdm_required_size_genirq()) - 1),
+		core->bl_tag);
+	if (rc) {
+		pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+		goto end;
+	}
+
+	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+		pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+	}
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t pending_bl = 0;
+	int write_count = 0;
+
+	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+		pr_info("requested BL more than max size, cnt=%d max=%d\n",
+			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+	}
+
+	if (atomic_read(&core->error) != 0) {
+		pr_err("HW in error state, cannot trigger transactions now\n");
+		return rc;
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	mutex_lock(&client->lock);
+	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+	if (rc) {
+		pr_err("Cannot read the current BL depth\n");
+		mutex_unlock(&client->lock);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		return rc;
+	}
+
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t hw_vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (atomic_read(&core->error) != 0) {
+			pr_err("HW in error state cmd_count=%d total cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (write_count == 0) {
+			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+				(req->data->cmd_arrary_count - i));
+			if (write_count < 0) {
+				pr_err("wait for bl fifo failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			write_count--;
+		}
+
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_io_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+				&len);
+		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				pr_err("Hw bl hw_iova is invalid %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			hw_vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+		} else {
+			pr_err("Only mem hdl/hw va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (hw_vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("Got the HW VA\n");
+			rc = cam_hw_cdm_bl_write(cdm_hw,
+				((uint32_t)hw_vaddr_ptr +
+					cdm_cmd->cmd[i].offset),
+				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
+			if (rc) {
+				pr_err("Hw bl write failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for %d:%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+
+		if (!rc) {
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL\n");
+			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+				pr_err("Cannot commit the BL %d tag=%d\n",
+					i, (core->bl_tag - 1));
+				rc = -1;
+				break;
+			}
+			CDM_CDBG("BL commit success BL %d tag=%d\n", i,
+				(core->bl_tag - 1));
+			if ((req->data->flag == true) &&
+				(i == (req->data->cmd_arrary_count -
+				1))) {
+				rc = cam_hw_cdm_submit_gen_irq(
+					cdm_hw, req);
+				if (rc == 0)
+					core->bl_tag++;
+			}
+			if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
+				core->bl_tag))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("inline IRQ data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+					payload->irq_data,
+					&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+						CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+			pr_err("CDM HW BL done IRQ\n");
+			complete(&core->bl_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			pr_err("Invalid command IRQ, Need HW reset\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+			pr_err("AHB IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+			pr_err("Overflow IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		kfree(payload);
+	} else {
+		pr_err("NULL payload\n");
+	}
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_cdm *core = NULL;
+
+	if (token) {
+		cdm_hw = (struct cam_hw_info *)token;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		atomic_inc(&core->error);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		pr_err("Page fault iova addr %pK\n", (void *)iova);
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+			(void *)iova);
+		atomic_dec(&core->error);
+	} else {
+		pr_err("Invalid token\n");
+	}
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cdm_hw = data;
+	struct cam_cdm *cdm_core = cdm_hw->core_info;
+	struct cam_cdm_work_payload *payload;
+	bool work_status;
+
+	CDM_CDBG("Got irq\n");
+	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+	if (payload) {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+				&payload->irq_status)) {
+			pr_err("Failed to read CDM HW IRQ status\n");
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+				&payload->irq_data)) {
+				pr_err("Failed to read CDM HW IRQ data\n");
+			}
+		}
+		CDM_CDBG("Got payload=%d\n", payload->irq_status);
+		payload->hw = cdm_hw;
+		INIT_WORK((struct work_struct *)&payload->work,
+			cam_hw_cdm_work);
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+			payload->irq_status))
+			pr_err("Failed to Write CDM HW IRQ Clear\n");
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+			pr_err("Failed to Write CDM HW IRQ cmd\n");
+		work_status = queue_work(cdm_core->work_queue, &payload->work);
+		if (work_status == false) {
+			pr_err("Failed to queue work for irq=%x\n",
+				payload->irq_status);
+			kfree(payload);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+	struct cam_mem_mgr_memory_desc genirq_alloc_out;
+	struct cam_cdm *cdm_core = NULL;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_alloc_cmd.align = 0;
+	genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+	genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+	genirq_alloc_cmd.flags = 0;
+	genirq_alloc_cmd.region = CAM_MEM_MGR_REGION_NON_SECURE_IO;
+	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+		&genirq_alloc_out);
+	if (rc) {
+		pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+		goto end;
+	}
+	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+	cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+	cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+	cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_mem_mgr_memory_desc genirq_release_cmd;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+	if (rc)
+		pr_err("Failed to put genirq cmd space for hw\n");
+
+	return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc;
+	long time_left;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("Enable platform failed\n");
+		goto end;
+	}
+
+	CDM_CDBG("Enable soc done\n");
+
+/* Before triggering the reset to HW, clear the reset complete */
+	reinit_completion(&cdm_core->reset_complete);
+	reinit_completion(&cdm_core->bl_complete);
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+		pr_err("Failed to Write CDM HW IRQ mask\n");
+		goto disable_return;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+		pr_err("Failed to Write CDM HW reset\n");
+		goto disable_return;
+	}
+
+	CDM_CDBG("Waiting for CDM HW resetdone\n");
+	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+			msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+	if (time_left <= 0) {
+		pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+		goto disable_return;
+	} else {
+		CDM_CDBG("CDM Init success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+		cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
+		rc = 0;
+		goto end;
+	}
+
+disable_return:
+	rc = -1;
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+	return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = cdm_hw->core_info;
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("disable platform failed\n");
+	} else {
+		CDM_CDBG("CDM Deinit success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_HW_CDM;
+	cdm_hw->open_count = 0;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+
+	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		goto release_mem;
+	}
+	cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+		cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	atomic_set(&cdm_core->error, 0);
+	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+	if (cdm_core->id >= CAM_CDM_MAX) {
+		pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+		goto release_private_mem;
+	}
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	init_completion(&cdm_core->bl_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+	cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+	mutex_lock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+	if (rc < 0) {
+		pr_err("cpas-cdm get iommu handle failed\n");
+		goto unlock_release_mem;
+	}
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
+	if (rc < 0) {
+		pr_err("Attach iommu non secure handle failed\n");
+		goto destroy_non_secure_hdl;
+	}
+	cdm_core->iommu_hdl.secure = -1;
+
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+
+	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+			cam_hw_cdm_irq, cdm_hw);
+	if (rc) {
+		pr_err("Failed to request platform resource\n");
+		goto destroy_non_secure_hdl;
+	}
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto release_platform_resource;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Init CDM HW\n");
+		goto init_failed;
+	}
+	cdm_hw->open_count++;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_version)) {
+		pr_err("Failed to read CDM HW Version\n");
+		goto deinit;
+	}
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_family_version)) {
+		pr_err("Failed to read CDM family Version\n");
+		goto deinit;
+	}
+
+	CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+		cdm_core->hw_family_version, cdm_core->hw_version);
+	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+		false);
+	if (!cdm_core->ops) {
+		pr_err("Failed to util ops for hw\n");
+		goto deinit;
+	}
+
+	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+		&cdm_core->version)) {
+		pr_err("Failed to set cam he version for hw\n");
+		goto deinit;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Deinit CDM HW\n");
+		goto release_platform_resource;
+	}
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+		soc_private, CAM_HW_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("HW CDM Interface registration failed\n");
+		goto release_platform_resource;
+	}
+	cdm_hw->open_count--;
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	return rc;
+
+deinit:
+	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+		pr_err("Deinit failed for hw\n");
+	cdm_hw->open_count--;
+init_failed:
+	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+		pr_err("CPAS unregister failed\n");
+release_platform_resource:
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+unlock_release_mem:
+	mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+	kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+	int rc = -EBUSY;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get hw private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get hw core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	if (cdm_hw->open_count != 0) {
+		pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+			cdm_hw->open_count);
+		return rc;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Deinit failed for hw\n");
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+
+	return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+	.probe = cam_hw_cdm_probe,
+	.remove = cam_hw_cdm_remove,
+	.driver = {
+		.name = "msm_cam_cdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_hw_cdm_dt_match,
+	},
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+	return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+	platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
new file mode 100644
index 0000000..b1b2117
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -0,0 +1,569 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+	{ .compatible = "qcom,cam-cdm-intf", },
+	{}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+	int rc = 0;
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+		rc = -1;
+	} else {
+		CDM_CDBG("CDM intf mgr get refcount=%d\n",
+			cdm_mgr.refcount);
+		cdm_mgr.refcount++;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+	} else {
+		CDM_CDBG("CDM intf mgr put refcount=%d\n",
+			cdm_mgr.refcount);
+		if (cdm_mgr.refcount > 0) {
+			cdm_mgr.refcount--;
+		} else {
+			pr_err("Refcount put when zero\n");
+			WARN_ON(1);
+		}
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+	uint32_t hw_idx)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+	if (hw->hw_ops.get_hw_caps) {
+		rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+			sizeof(struct cam_iommu_handle));
+	}
+
+	return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+	uint32_t cell_index, uint32_t *hw_index)
+{
+	int rc = -1, i, j;
+	char client_name[128];
+
+	CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+		identifier, cell_index);
+	snprintf(client_name, sizeof(client_name), "%s", identifier);
+	CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+		cdm_mgr.cdm_count);
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		CDM_CDBG("dt_num_supported_clients=%d\n",
+			cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+		for (j = 0; j <
+			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+			CDM_CDBG("client name:%s\n",
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				client_name)) {
+				rc = 0;
+				*hw_index = i;
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+
+	return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles)
+{
+	int i, j, rc = -1;
+
+	if ((!identifier) || (!cdm_handles))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+	CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		if (!cdm_mgr.nodes[i].data) {
+			mutex_unlock(&cdm_mgr.nodes[i].lock);
+			continue;
+		}
+		for (j = 0; j <
+			 cdm_mgr.nodes[i].data->dt_num_supported_clients;
+			j++) {
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				identifier)) {
+				rc = get_cdm_iommu_handle(cdm_handles, i);
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw;
+	uint32_t hw_index = 0;
+
+	if ((!data) || (!data->identifier) || (!data->base_array) ||
+		(!data->base_array_cnt))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (data->id > CAM_CDM_HW_ANY) {
+		pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
+		rc = -1;
+		goto end;
+	}
+	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+		&hw_index);
+	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+		pr_err("Failed to identify associated hw id\n");
+		goto end;
+	} else {
+		CDM_CDBG("hw_index:%d\n", hw_index);
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+					sizeof(struct cam_cdm_acquire_data));
+			if (rc < 0) {
+				pr_err("CDM hw acquire failed\n");
+				goto end;
+			}
+		} else {
+			pr_err("idx %d doesn't have acquire ops\n", hw_index);
+			rc = -1;
+		}
+	}
+end:
+	if (rc < 0) {
+		pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+			data->id, data->identifier, data->cell_index);
+		put_cdm_mgr_refcount();
+	}
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("hw release failed for handle=%x\n",
+					handle);
+		} else
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+	}
+	put_cdm_mgr_refcount();
+	if (rc == 0)
+		put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (!data)
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			req.data = data;
+			req.handle = handle;
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+			if (rc < 0)
+				pr_err("hw submit bl failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have submit ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+			if (hw && hw->hw_ops.start) {
+				rc = hw->hw_ops.start(hw->hw_priv, &handle,
+						sizeof(uint32_t));
+				if (rc < 0)
+					pr_err("hw start failed handle=%x\n",
+						handle);
+			} else {
+				pr_err("hw idx %d doesn't have start ops\n",
+					hw_index);
+			}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.stop) {
+			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+					sizeof(uint32_t));
+			if (rc < 0)
+				pr_err("hw stop failed handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have stop ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("CDM hw release failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data) || (!index))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+		mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+		cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else {
+		pr_err("CDM registration failed type=%d count=%d\n",
+			type, cdm_mgr.cdm_count);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index)
+{
+	int rc = -1;
+
+	if ((!hw) || (!data))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+		(index == CAM_SW_CDM_INDEX)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) &&
+		(hw == cdm_mgr.nodes[index].device)) {
+		mutex_lock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.nodes[index].device = NULL;
+		cdm_mgr.nodes[index].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.cdm_count--;
+		rc = 0;
+	} else {
+		pr_err("CDM Deregistration failed type=%d index=%d\n",
+			type, index);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+	int i, rc;
+
+	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		return rc;
+	}
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		mutex_init(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = true;
+	cdm_mgr.refcount = 0;
+	mutex_unlock(&cam_cdm_mgr_lock);
+	rc = cam_virtual_cdm_probe(pdev);
+	if (rc) {
+		mutex_lock(&cam_cdm_mgr_lock);
+		cdm_mgr.probe_done = false;
+		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+				(cdm_mgr.nodes[i].refcount != 0))
+				pr_err("Valid node present in index=%d\n", i);
+			mutex_destroy(&cdm_mgr.nodes[i].lock);
+			cdm_mgr.nodes[i].device = NULL;
+			cdm_mgr.nodes[i].data = NULL;
+			cdm_mgr.nodes[i].refcount = 0;
+		}
+		mutex_unlock(&cam_cdm_mgr_lock);
+	}
+
+	return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+	int i, rc = -EBUSY;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (cam_virtual_cdm_remove(pdev)) {
+		pr_err("Virtual CDM remove failed\n");
+		goto end;
+	}
+	put_cdm_mgr_refcount();
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.refcount != 0) {
+		pr_err("cdm manger refcount not zero %d\n",
+			cdm_mgr.refcount);
+		goto end;
+	}
+
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+			(cdm_mgr.nodes[i].refcount != 0)) {
+			pr_err("Valid node present in index=%d\n", i);
+			mutex_unlock(&cam_cdm_mgr_lock);
+			goto end;
+		}
+		mutex_destroy(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = false;
+	rc = 0;
+
+end:
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+	.probe = cam_cdm_intf_probe,
+	.remove = cam_cdm_intf_remove,
+	.driver = {
+	.name = "msm_cam_cdm_intf",
+	.owner = THIS_MODULE,
+	.of_match_table = msm_cam_cdm_intf_dt_match,
+	},
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+	return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+	platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
new file mode 100644
index 0000000..66c75f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+	CAM_CDM_VIRTUAL,
+	CAM_CDM_HW_ANY,
+	CAM_CDM_CPAS_0,
+	CAM_CDM_IPE0,
+	CAM_CDM_IPE1,
+	CAM_CDM_BPS,
+	CAM_CDM_VFE,
+	CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+	CAM_CDM_CB_STATUS_BL_SUCCESS,
+	CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+	CAM_CDM_CB_STATUS_PAGEFAULT,
+	CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+	CAM_CDM_CB_STATUS_HW_RESET_DONE,
+	CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+	CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+	CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+	CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ *                    like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ *                     of the device. This can be used to form a unique string
+ *                     with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ *             of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ *                     callbacks from CDM driver
+ *                     @handle : CDM Client handle
+ *                     @userdata : Private data given at the time of acquire
+ *                     @status : Callback status
+ *                     @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ *                   in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ *               needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ *                it is Input while acquiring virtual cdm, Currently fixing it
+ *                to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ *                     util ops for this HW version of CDM acquired.
+ * @handle  : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+	char identifier[128];
+	uint32_t cell_index;
+	enum cam_cdm_id id;
+	void *userdata;
+	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+		enum cam_cdm_cb_status status, uint32_t cookie);
+	uint32_t base_array_cnt;
+	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+	struct cam_hw_version cdm_version;
+	struct cam_cdm_utils_ops *ops;
+	uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ *           by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+	union {
+		int32_t mem_handle;
+		uint32_t *hw_iova;
+		void *kernel_iova;
+	} bl_addr;
+	uint32_t  offset;
+	uint32_t  len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ *            request is done
+ * @userdata :Input private data which will be returned as part
+ *             of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array     : Input payload holding the BL cmd's arrary
+ *                     to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+	int flag;
+	void *userdata;
+	uint32_t cookie;
+	enum cam_cdm_bl_cmd_addr_type type;
+	uint32_t cmd_arrary_count;
+	struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ *                   like vfe, ife, jpeg etc, We do not need cell index
+ *                   assuming all devices of a single type maps to one SMMU
+ *                   client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data   : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ *          this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
new file mode 100644
index 0000000..0f5458c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+		reg)));
+	CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("accessing invalid mapped region %d\n", reg);
+			goto permission_error;
+		}
+		*value = cam_io_r_mb(reg_addr);
+		CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
+			(void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)),	*value);
+		return false;
+	}
+permission_error:
+	*value = 0;
+	return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("Accessing invalid region %d:%d\n",
+				reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+			goto permission_error;
+		}
+		cam_io_w_mb(value, reg_addr);
+		return false;
+	}
+permission_error:
+	return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr)
+{
+	int i, rc = -1;
+
+	ptr->dt_num_supported_clients = of_property_count_strings(
+						pdev->dev.of_node,
+						"cdm-client-names");
+	CDM_CDBG("Num supported cdm_client = %d\n",
+		ptr->dt_num_supported_clients);
+	if (ptr->dt_num_supported_clients >
+		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+		pr_err("Invalid count of client names count=%d\n",
+			ptr->dt_num_supported_clients);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (ptr->dt_num_supported_clients < 0) {
+		CDM_CDBG("No cdm client names found\n");
+		ptr->dt_num_supported_clients = 0;
+		ptr->dt_cdm_shared = false;
+	} else {
+		ptr->dt_cdm_shared = true;
+	}
+	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+		CDM_CDBG("cdm-client-names[%d] = %s\n",	i,
+			ptr->dt_cdm_client_name[i]);
+		if (rc < 0) {
+			pr_err("Reading cdm-client-names failed\n");
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table)
+{
+	int rc;
+	struct cam_hw_soc_info *soc_ptr;
+	const struct of_device_id *id;
+
+	if (!cdm_hw  || (cdm_hw->soc_info.soc_private)
+		|| !(cdm_hw->soc_info.pdev))
+		return -EINVAL;
+
+	soc_ptr = &cdm_hw->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_ptr);
+	if (rc != 0) {
+		pr_err("Failed to retrieve the CDM dt properties\n");
+	} else {
+		soc_ptr->soc_private = kzalloc(
+				sizeof(struct cam_cdm_private_dt_data),
+				GFP_KERNEL);
+		if (!soc_ptr->soc_private)
+			return -ENOMEM;
+
+		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+			soc_ptr->soc_private);
+		if (rc != 0) {
+			pr_err("Failed to load CDM dt private data\n");
+			goto error;
+		}
+		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+		if ((!id) || !(id->data)) {
+			pr_err("Failed to retrieve the CDM id table\n");
+			goto error;
+		}
+		CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+			(struct cam_cdm_reg_offset_table *)id->data;
+		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+			id->compatible,
+			sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+	}
+
+	return rc;
+
+error:
+	rc = -1;
+	kfree(soc_ptr->soc_private);
+	soc_ptr->soc_private = NULL;
+	return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
+	CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
new file mode 100644
index 0000000..765aba4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev,
+	struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
new file mode 100644
index 0000000..034c782
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -0,0 +1,571 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+static unsigned int CDMCmdHeaderSizes[
+	CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+	0, /* UNUSED*/
+	3, /* DMI*/
+	0, /* UNUSED*/
+	2, /* RegContinuous*/
+	1, /* RegRandom*/
+	2, /* BUFFER_INDIREC*/
+	2, /* GenerateIRQ*/
+	3, /* WaitForEvent*/
+	1, /* ChangeBase*/
+	1, /* PERF_CONTINUOUSROL*/
+	3, /* DMI32*/
+	3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+	unsigned int count    : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+	unsigned int count     : 16;
+	unsigned int reserved0 : 8;
+	unsigned int cmd       : 8;
+	unsigned int offset    : 24;
+	unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr:  Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+	unsigned int length     : 16;
+	unsigned int reserved   : 8;
+	unsigned int cmd        : 8;
+	unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+	unsigned int base   : 24;
+	unsigned int cmd    : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+	unsigned int mask             : 8;
+	unsigned int id               : 8;
+	unsigned int iw_reserved      : 7;
+	unsigned int iw               : 1;
+	unsigned int cmd              : 8;
+	unsigned int offset           : 24;
+	unsigned int offset_reserved  : 8;
+	unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+	unsigned int reserved   : 24;
+	unsigned int cmd        : 8;
+	unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+	unsigned int perf     : 2;
+	unsigned int reserved : 22;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+	return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t  numVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+		(2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+	return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+	return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+	uint32_t numVals, uint32_t *pVals)
+{
+	uint32_t i;
+	struct cdm_regcontinuous_cmd *pHeader =
+		(struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+	pHeader->count = numVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+	pHeader->reserved0 = 0;
+	pHeader->reserved1 = 0;
+	pHeader->offset = reg;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+	for (i = 0; i < numVals; i++)
+		(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+	pCmdBuffer += numVals;
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+	uint32_t *pRegVals)
+{
+	uint32_t i;
+	uint32_t *dst, *src;
+	struct cdm_regrandom_cmd *pHeader =
+		(struct cdm_regrandom_cmd *)pCmdBuffer;
+
+	pHeader->count = numRegVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+	pHeader->reserved = 0;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	dst = pCmdBuffer;
+	src = pRegVals;
+	for (i = 0; i < numRegVals; i++) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+	uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+	uint32_t length)
+{
+	struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+	pHeader->cmd        = dmiCmd;
+	pHeader->addr = dmiBufferAddr;
+	pHeader->length = length - 1;
+	pHeader->DMIAddr = DMIAddr;
+	pHeader->DMISel = DMISel;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+	uint32_t length)
+{
+	struct cdm_indirect_cmd *pHeader =
+		(struct cdm_indirect_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+	pHeader->addr = indirectBufAddr;
+	pHeader->length = length - 1;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+	struct cdm_changebase_cmd *pHeader =
+		(struct cdm_changebase_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+	pHeader->base = base;
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+	return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+	struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+	pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+	cdm_get_cmd_header_size,
+	cdm_required_size_reg_continuous,
+	cdm_required_size_reg_random,
+	cdm_required_size_dmi,
+	cdm_required_size_genirq,
+	cdm_required_size_indirect,
+	cdm_required_size_changebase,
+	cdm_offsetof_dmi_addr,
+	cdm_offsetof_indirect_addr,
+	cdm_write_regcontinuous,
+	cdm_write_regrandom,
+	cdm_write_dmi,
+	cdm_write_indirect,
+	cdm_write_changebase,
+	cdm_write_genirq,
+};
+
+void cam_cdm_data_alignement_check(void)
+{
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI)));
+	BUILD_BUG_ON(sizeof(struct cdm_regcontinuous_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)));
+	BUILD_BUG_ON(sizeof(struct cdm_regrandom_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+	BUILD_BUG_ON(sizeof(struct cdm_indirect_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT)));
+	BUILD_BUG_ON(sizeof(struct cdm_genirq_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ)));
+	BUILD_BUG_ON(sizeof(struct cdm_wait_event_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT)));
+	BUILD_BUG_ON(sizeof(struct cdm_changebase_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE)));
+	BUILD_BUG_ON(sizeof(struct  cdm_perf_ctrl_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_PERF_CTRL)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_32)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_64)));
+}
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+	uint32_t base_array_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	void __iomem **device_base)
+{
+	int ret = -1, i;
+
+	for (i = 0; i < base_array_size; i++) {
+		if (base_table[i])
+			CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+			i, (base_table[i])->mem_cam_base, hw_base);
+		if ((base_table[i]) &&
+			((base_table[i])->mem_cam_base == hw_base)) {
+			*device_base = (base_table[i])->mem_base;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	int ret = 0;
+	uint32_t *data;
+	struct cdm_regcontinuous_cmd *reg_cont;
+
+	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+		(!base_addr)) {
+		pr_err(" invalid base addr and data length  %d %pK\n",
+			cmd_buf_size, base_addr);
+		return -EINVAL;
+	}
+
+	reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+	if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+		(((reg_cont->count * sizeof(uint32_t)) +
+			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+			cmd_buf_size)) {
+		pr_err(" buffer size %d is not sufficient for count%d\n",
+			cmd_buf_size, reg_cont->count);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+	cam_io_memcpy(base_addr + reg_cont->offset,	data,
+		reg_cont->count * sizeof(uint32_t));
+
+	*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_regrandom_cmd *reg_random;
+	uint32_t *data;
+
+	if (!base_addr) {
+		pr_err("invalid base address\n");
+		return -EINVAL;
+	}
+
+	reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+	if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+		(((reg_random->count * (sizeof(uint32_t) * 2)) +
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+			cmd_buf_size)) {
+		pr_err("invalid reg_count  %d cmd_buf_size %d\n",
+			reg_random->count, cmd_buf_size);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+	for (i = 0; i < reg_random->count; i++) {
+		CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
+			((uint64_t) base_addr + data[0]), data[1]);
+		cam_io_w(data[1], base_addr + data[0]);
+		data += 2;
+	}
+
+	*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+	return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+	void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_dmi_cmd *swd_dmi;
+	uint32_t *data;
+
+	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+		pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_required_size_dmi();
+
+	if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+		for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			cam_io_w_mb(data[1], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+			data += 2;
+		}
+	} else {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			data += 1;
+		}
+	}
+	*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+	return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag)
+{
+	int ret = 0;
+	uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+	uint32_t used_bytes = 0;
+
+	total_cmd_buf_size = cmd_buf_size;
+
+	while (cmd_buf_size > 0) {
+		CDM_CDBG("cmd data=%x\n", *cmd_buf);
+		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+		switch (cdm_cmd_type) {
+		case CAM_CDM_CMD_REG_CONT: {
+			ret = cam_cdm_util_reg_cont_write(*current_device_base,
+				cmd_buf, cmd_buf_size, &used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes/4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_REG_RANDOM: {
+			ret = cam_cdm_util_reg_random_write(
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_SWD_DMI_32:
+		case CAM_CDM_CMD_SWD_DMI_64: {
+			if (*current_device_base == 0) {
+				pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+					cdm_cmd_type);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE: {
+			struct cdm_changebase_cmd *change_base_cmd =
+				(struct cdm_changebase_cmd *)cmd_buf;
+
+			ret = cam_cdm_get_ioremap_from_base(
+				change_base_cmd->base, base_array_size,
+				base_table, current_device_base);
+			if (ret != 0) {
+				pr_err("Get ioremap change base failed %x\n",
+					change_base_cmd->base);
+				break;
+			}
+			CDM_CDBG("Got ioremap for %x addr=%pK\n",
+				change_base_cmd->base,
+				current_device_base);
+			cmd_buf_size -= (4 *
+				cdm_required_size_changebase());
+			cmd_buf += cdm_required_size_changebase();
+			}
+			break;
+		default:
+			pr_err(" unsupported cdm_cmd_type type 0%x\n",
+			cdm_cmd_type);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
new file mode 100644
index 0000000..09d0d63
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+#define CAM_CDM_SW_CMD_COUNT    2
+#define CAM_CMD_LENGTH_MASK     0xFFFF
+#define CAM_CDM_COMMAND_OFFSET  24
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET   8
+#define CAM_CDM_DMI_DATA_LO_OFFSET   12
+
+enum cam_cdm_command {
+	CAM_CDM_CMD_UNUSED = 0x0,
+	CAM_CDM_CMD_DMI = 0x1,
+	CAM_CDM_CMD_NOT_DEFINED = 0x2,
+	CAM_CDM_CMD_REG_CONT = 0x3,
+	CAM_CDM_CMD_REG_RANDOM = 0x4,
+	CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+	CAM_CDM_CMD_GEN_IRQ = 0x6,
+	CAM_CDM_CMD_WAIT_EVENT = 0x7,
+	CAM_CDM_CMD_CHANGE_BASE = 0x8,
+	CAM_CDM_CMD_PERF_CTRL = 0x9,
+	CAM_CDM_CMD_DMI_32 = 0xa,
+	CAM_CDM_CMD_DMI_64 = 0xb,
+	CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+	CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+	CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+	CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ *                           in DWORDs.
+ *      @command Command ID
+ *      @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ *                                    command in dwords.
+ *      @numVals Number of continuous values
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ *                                in dwords.
+ *      @numRegVals  Number of register/value pairs
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ *                              in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ *                                in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ *                         command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ *                              indirect command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ *      @pCmdBuffer:  Pointer to command buffer
+ *      @reg: Beginning of the register address range where
+ *            values will be written.
+ *      @numVals: Number of values (registers) that will be written
+ *      @pVals : An array of values that will be written
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ *                       register/value pairs.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @numRegVals: Number of register/value pairs that will be written
+ *      @pRegVals: An array of register/value pairs that will be written
+ *                 The even indices are registers and the odd indices
+ *                 arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @dmiCmd: DMI command
+ *      @DMIAddr: Address of the DMI
+ *      @DMISel: Selected bank that the DMI will write to
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @indirectBufferAddr: Device address of the indirect cmd buffer.
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ *                        the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @base: New base (device) address
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq:  Writes a gen irq command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t  numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+	uint32_t *pCmdBuffer,
+	uint32_t reg,
+	uint32_t numVals,
+	uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+	uint32_t *pCmdBuffer,
+	uint32_t numRegVals,
+	uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+	uint32_t *pCmdBuffer,
+	uint8_t  dmiCmd,
+	uint32_t DMIAddr,
+	uint8_t  DMISel,
+	uint32_t dmiBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+	uint32_t *pCmdBuffer,
+	uint32_t indirectBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+	uint32_t *pCmdBuffer,
+	uint32_t base);
+void (*cdm_write_genirq)(
+	uint32_t *pCmdBuffer,
+	uint32_t  userdata);
+};
+
+#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
new file mode 100644
index 0000000..ed07218
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
new file mode 100644
index 0000000..e34bfc2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		if (payload->irq_status & 0x2) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+				payload->irq_data,
+				&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+		if (payload->irq_status & 0x1) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		kfree(payload);
+	}
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	mutex_lock(&client->lock);
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_cpu_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+				&len);
+		} else if (req->data->type ==
+			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+			rc = 0;
+			vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+		} else {
+			pr_err("Only mem hdl/Kernel va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+				cdm_cmd->cmd[i].len, len);
+			rc = cam_cdm_util_cmd_buf_write(
+				&client->changebase_addr,
+				((uint32_t *)vaddr_ptr +
+					((cdm_cmd->cmd[i].offset)/4)),
+				cdm_cmd->cmd[i].len, client->data.base_array,
+				client->data.base_array_cnt, core->bl_tag);
+			if (rc) {
+				pr_err("write failed for cnt=%d:%d\n",
+					i, req->data->cmd_arrary_count);
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (!rc) {
+			struct cam_cdm_work_payload *payload;
+
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			if ((true == req->data->flag) &&
+				(i == req->data->cmd_arrary_count)) {
+				struct cam_cdm_bl_cb_request_entry *node;
+
+				node = kzalloc(sizeof(
+					struct cam_cdm_bl_cb_request_entry),
+					GFP_KERNEL);
+				if (!node) {
+					rc = -ENOMEM;
+					break;
+				}
+				node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+				node->client_hdl = req->handle;
+				node->cookie = req->data->cookie;
+				node->bl_tag = core->bl_tag;
+				node->userdata = req->data->userdata;
+				mutex_lock(&cdm_hw->hw_mutex);
+				list_add_tail(&node->entry,
+					&core->bl_request_list);
+				mutex_unlock(&cdm_hw->hw_mutex);
+
+				payload = kzalloc(sizeof(
+					struct cam_cdm_work_payload),
+					GFP_ATOMIC);
+				if (payload) {
+					payload->irq_status = 0x2;
+					payload->irq_data = core->bl_tag;
+					payload->hw = cdm_hw;
+					INIT_WORK((struct work_struct *)
+						&payload->work,
+						cam_virtual_cdm_work);
+					queue_work(core->work_queue,
+						&payload->work);
+					}
+			}
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL nothing for virtual\n");
+			if (!rc && (core->bl_tag == 63))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	int rc;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+	cdm_hw->soc_info.soc_private = kzalloc(
+			sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+	if (!cdm_hw->soc_info.soc_private) {
+		rc = -ENOMEM;
+		goto soc_load_failed;
+	}
+
+	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+	if (rc != 0) {
+		pr_err("Failed to load CDM dt private data\n");
+		rc = -1;
+		kfree(cdm_hw->soc_info.soc_private);
+		cdm_hw->soc_info.soc_private = NULL;
+		goto soc_load_failed;
+	}
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+					cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = NULL;
+	cdm_hw_intf->hw_ops.deinit = NULL;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	cdm_hw->open_count = 0;
+	cdm_core->iommu_hdl.non_secure = -1;
+	cdm_core->iommu_hdl.secure = -1;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+	mutex_lock(&cdm_hw->hw_mutex);
+	cdm_core->id = CAM_CDM_VIRTUAL;
+	memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+		sizeof(CAM_CDM_VIRTUAL_NAME));
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+	cdm_core->ops = NULL;
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+		CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto cpas_registration_failed;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface registration failed\n");
+		goto intf_registration_failed;
+	}
+	CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	return 0;
+intf_registration_failed:
+	cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+	kfree(cdm_hw->soc_info.soc_private);
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = -EBUSY;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+			cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface de-registration failed\n");
+		return rc;
+	}
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	rc = 0;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
new file mode 100644
index 0000000..183b657
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+	{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+	.first_offset = 0x0,
+	.last_offset = 0x200,
+	.reg_count = 0x30,
+	.offsets = cam170_cpas_cdm_register_offsets,
+	.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+		sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index 03b18cf..429474b 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -75,7 +75,26 @@
 static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
 	unsigned int cmd, unsigned long arg)
 {
-	return cam_subdev_ioctl(sd, cmd, compat_ptr(arg));
+	struct cam_control cmd_data;
+	int rc;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+	rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
 }
 #endif
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/Makefile b/drivers/media/platform/msm/camera/cam_cpas/Makefile
new file mode 100644
index 0000000..63dc58e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/cpas_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/camss_top
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
new file mode 100644
index 0000000..4f246e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -0,0 +1,1415 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+	int i;
+
+	for (i = 0; i < num_strings; i++) {
+		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+			CPAS_CDBG("matched %s : %d\n", matching_string, i);
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (reg_info->enable == false)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	if (reg_info->masked_value) {
+		value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base +
+			reg_info->offset);
+		value = value & (~reg_info->mask);
+		value = value | (reg_info->value << reg_info->shift);
+	} else {
+		value = reg_info->value;
+	}
+
+	CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+		reg_base, reg_info->offset, value);
+
+	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+		reg_info->offset);
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+	struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+		pr_err("Invalid params %d %d\n", bus_client->valid,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	if (level >= bus_client->num_usecases) {
+		pr_err("Invalid vote level=%d, usecases=%d\n", level,
+			bus_client->num_usecases);
+		return -EINVAL;
+	}
+
+	if (level == bus_client->curr_vote_level)
+		return 0;
+
+	CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+	msm_bus_scale_client_update_request(bus_client->client_id, level);
+	bus_client->curr_vote_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib)
+{
+	struct msm_bus_paths *path;
+	struct msm_bus_scale_pdata *pdata;
+	int idx = 0;
+
+	if (!bus_client->valid) {
+		pr_err("bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if ((bus_client->num_usecases != 2) ||
+		(bus_client->num_paths != 1) ||
+		(bus_client->dyn_vote != true)) {
+		pr_err("dynamic update not allowed %d %d %d\n",
+			bus_client->num_usecases, bus_client->num_paths,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bus_client->lock);
+
+	if (bus_client->curr_vote_level > 1) {
+		pr_err("curr_vote_level %d cannot be greater than 1\n",
+			bus_client->curr_vote_level);
+		mutex_unlock(&bus_client->lock);
+		return -EINVAL;
+	}
+
+	idx = bus_client->curr_vote_level;
+	idx = 1 - idx;
+	bus_client->curr_vote_level = idx;
+	mutex_unlock(&bus_client->lock);
+
+	pdata = bus_client->pdata;
+	path = &(pdata->usecase[idx]);
+	path->vectors[0].ab = ab;
+	path->vectors[0].ib = ib;
+
+	CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+		bus_client->client_id, ab, ib, idx);
+	msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+	return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+	struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+	struct cam_cpas_bus_client *bus_client)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	uint32_t client_id;
+	int rc;
+
+	pdata = msm_bus_pdata_from_node(soc_info->pdev,
+		dev_node);
+	if (!pdata) {
+		pr_err("failed get_pdata\n");
+		return -EINVAL;
+	}
+
+	if ((pdata->num_usecases == 0) ||
+		(pdata->usecase[0].num_paths == 0)) {
+		pr_err("usecase=%d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	client_id = msm_bus_scale_register_client(pdata);
+	if (!client_id) {
+		pr_err("failed in register ahb bus client\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bus_client->dyn_vote = of_property_read_bool(dev_node,
+		"qcom,msm-bus-vector-dyn-vote");
+
+	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+		pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto fail_unregister_client;
+	}
+
+	msm_bus_scale_client_update_request(client_id, 0);
+
+	bus_client->src = pdata->usecase[0].vectors[0].src;
+	bus_client->dst = pdata->usecase[0].vectors[0].dst;
+	bus_client->pdata = pdata;
+	bus_client->client_id = client_id;
+	bus_client->num_usecases = pdata->num_usecases;
+	bus_client->num_paths = pdata->usecase[0].num_paths;
+	bus_client->curr_vote_level = 0;
+	bus_client->valid = true;
+	mutex_init(&bus_client->lock);
+
+	CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+		bus_client->src, bus_client->dst, bus_client->client_id);
+
+	return 0;
+fail_unregister_client:
+	msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+	return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+	struct cam_cpas_bus_client *bus_client)
+{
+	if (!bus_client->valid)
+		return -EINVAL;
+
+	if (bus_client->dyn_vote)
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0);
+	else
+		cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+	msm_bus_scale_unregister_client(bus_client->client_id);
+	bus_client->valid = false;
+
+	mutex_destroy(&bus_client->lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		cam_cpas_util_unregister_bus_client(&curr_port->mnoc_bus);
+		of_node_put(curr_port->axi_port_mnoc_node);
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_unregister_bus_client(
+				&curr_port->camnoc_bus);
+			of_node_put(curr_port->axi_port_camnoc_node);
+		}
+		of_node_put(curr_port->axi_port_node);
+		list_del(&curr_port->sibling_port);
+		mutex_destroy(&curr_port->lock);
+		kfree(curr_port);
+	}
+
+	of_node_put(soc_private->axi_port_list_node);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *axi_port;
+	int rc;
+	struct device_node *axi_port_list_node;
+	struct device_node *axi_port_node = NULL;
+	struct device_node *axi_port_mnoc_node = NULL;
+	struct device_node *axi_port_camnoc_node = NULL;
+
+	INIT_LIST_HEAD(&cpas_core->axi_ports_list_head);
+
+	axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
+		"qcom,axi-port-list");
+	if (!axi_port_list_node) {
+		pr_err("Node qcom,axi-port-list not found.\n");
+		return -EINVAL;
+	}
+
+	soc_private->axi_port_list_node = axi_port_list_node;
+
+	for_each_available_child_of_node(axi_port_list_node, axi_port_node) {
+		axi_port = kzalloc(sizeof(*axi_port), GFP_KERNEL);
+		if (!axi_port) {
+			rc = -ENOMEM;
+			goto error_previous_axi_cleanup;
+		}
+		axi_port->axi_port_node = axi_port_node;
+
+		rc = of_property_read_string_index(axi_port_node,
+			"qcom,axi-port-name", 0,
+			(const char **)&axi_port->axi_port_name);
+		if (rc) {
+			pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+			goto port_name_fail;
+		}
+
+		axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
+			"qcom,axi-port-mnoc");
+		if (!axi_port_mnoc_node) {
+			pr_err("Node qcom,axi-port-mnoc not found.\n");
+			rc = -EINVAL;
+			goto mnoc_node_get_fail;
+		}
+		axi_port->axi_port_mnoc_node = axi_port_mnoc_node;
+
+		rc = cam_cpas_util_register_bus_client(soc_info,
+			axi_port_mnoc_node, &axi_port->mnoc_bus);
+		if (rc)
+			goto mnoc_register_fail;
+
+		if (soc_private->axi_camnoc_based) {
+			axi_port_camnoc_node = of_find_node_by_name(
+				axi_port_node, "qcom,axi-port-camnoc");
+			if (!axi_port_camnoc_node) {
+				pr_err("Node qcom,axi-port-camnoc not found\n");
+				rc = -EINVAL;
+				goto camnoc_node_get_fail;
+			}
+			axi_port->axi_port_camnoc_node = axi_port_camnoc_node;
+
+			rc = cam_cpas_util_register_bus_client(soc_info,
+				axi_port_camnoc_node, &axi_port->camnoc_bus);
+			if (rc)
+				goto camnoc_register_fail;
+		}
+
+		mutex_init(&axi_port->lock);
+
+		INIT_LIST_HEAD(&axi_port->sibling_port);
+		list_add_tail(&axi_port->sibling_port,
+			&cpas_core->axi_ports_list_head);
+		INIT_LIST_HEAD(&axi_port->clients_list_head);
+	}
+
+	return 0;
+camnoc_register_fail:
+	of_node_put(axi_port->axi_port_camnoc_node);
+camnoc_node_get_fail:
+	cam_cpas_util_unregister_bus_client(&axi_port->mnoc_bus);
+mnoc_register_fail:
+	of_node_put(axi_port->axi_port_mnoc_node);
+mnoc_node_get_fail:
+port_name_fail:
+	of_node_put(axi_port->axi_port_node);
+	kfree(axi_port);
+error_previous_axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, soc_info);
+	return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+	int enable)
+{
+	int rc;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+	uint64_t camnoc_bw, mnoc_bw;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+	if (rc) {
+		pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+		return rc;
+	}
+
+	if (enable) {
+		mnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	} else {
+		mnoc_bw = 0;
+		camnoc_bw = 0;
+	}
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
+			mnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+				enable, rc);
+			goto remove_ahb_vote;
+		}
+
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_vote_bus_client_bw(
+				&curr_port->camnoc_bus, camnoc_bw, 0);
+			if (rc) {
+				pr_err("Failed in mnoc vote, enable=%d, %d\n",
+					enable, rc);
+				cam_cpas_util_vote_bus_client_bw(
+					&curr_port->mnoc_bus, 0, 0);
+				goto remove_ahb_vote;
+			}
+		}
+	}
+
+	return 0;
+remove_ahb_vote:
+	cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		CAM_SUSPEND_VOTE);
+	return rc;
+}
+
+static int cam_cpas_util_insert_client_to_axi_port(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, int32_t client_indx)
+{
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		if (strnstr(curr_port->axi_port_name,
+			soc_private->client_axi_port_name[client_indx],
+			strlen(curr_port->axi_port_name))) {
+
+			cpas_client->axi_port = curr_port;
+			INIT_LIST_HEAD(&cpas_client->axi_sibling_client);
+
+			mutex_lock(&curr_port->lock);
+			list_add_tail(&cpas_client->axi_sibling_client,
+				&cpas_client->axi_port->clients_list_head);
+			mutex_unlock(&curr_port->lock);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void cam_cpas_util_remove_client_from_axi_port(
+	struct cam_cpas_client *cpas_client)
+{
+	mutex_lock(&cpas_client->axi_port->lock);
+	list_del(&cpas_client->axi_sibling_client);
+	mutex_unlock(&cpas_client->axi_port->lock);
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		cam_io_w_mb(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		cam_io_w(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t *value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t reg_value;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!value)
+		return -EINVAL;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		reg_value = cam_io_r(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+	*value = reg_value;
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+	struct cam_cpas *cpas_core, struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas_client *curr_client;
+	struct cam_cpas_client *temp_client;
+	struct cam_axi_vote req_axi_vote = *axi_vote;
+	struct cam_cpas_axi_port *axi_port = cpas_client->axi_port;
+	uint64_t camnoc_bw = 0, mnoc_bw = 0;
+	int rc = 0;
+
+	if (!axi_port) {
+		pr_err("axi port does not exists\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure we use same bw for both compressed, uncompressed
+	 * in case client has requested either of one only
+	 */
+	if (req_axi_vote.compressed_bw == 0)
+		req_axi_vote.compressed_bw = req_axi_vote.uncompressed_bw;
+
+	if (req_axi_vote.uncompressed_bw == 0)
+		req_axi_vote.uncompressed_bw = req_axi_vote.compressed_bw;
+
+	if ((cpas_client->axi_vote.compressed_bw ==
+		req_axi_vote.compressed_bw) &&
+		(cpas_client->axi_vote.uncompressed_bw ==
+		req_axi_vote.uncompressed_bw))
+		return 0;
+
+	mutex_lock(&axi_port->lock);
+	cpas_client->axi_vote = req_axi_vote;
+
+	list_for_each_entry_safe(curr_client, temp_client,
+		&axi_port->clients_list_head, axi_sibling_client) {
+		camnoc_bw += curr_client->axi_vote.uncompressed_bw;
+		mnoc_bw += curr_client->axi_vote.compressed_bw;
+	}
+
+	if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
+		mnoc_bw = camnoc_bw;
+
+	CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+		axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
+		axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
+		camnoc_bw, mnoc_bw);
+
+	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
+		mnoc_bw, 0);
+	if (rc) {
+		pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+			mnoc_bw, mnoc_bw, rc);
+		goto unlock_axi_port;
+	}
+
+	if (soc_private->axi_camnoc_based) {
+		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
+			camnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+				camnoc_bw, camnoc_bw, rc);
+			goto unlock_axi_port;
+		}
+	}
+
+unlock_axi_port:
+	mutex_unlock(&axi_port->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote, client_handle=%d\n", client_handle);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private,
+		cpas_core->cpas_client[client_indx], axi_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+	enum cam_vote_level required_level;
+	enum cam_vote_level highest_level;
+	int i, rc = 0;
+
+	if (!ahb_bus_client->valid) {
+		pr_err("AHB Bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+		pr_err("Dynamic AHB vote not supported\n");
+		return -EINVAL;
+	}
+
+	required_level = ahb_vote->vote.level;
+
+	if (cpas_client->ahb_level == required_level)
+		return 0;
+
+	mutex_lock(&ahb_bus_client->lock);
+	cpas_client->ahb_level = required_level;
+
+	CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+		required_level, ahb_bus_client->curr_vote_level);
+
+	if (required_level == ahb_bus_client->curr_vote_level)
+		goto unlock_bus_client;
+
+	highest_level = required_level;
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (cpas_core->cpas_client[i] && (highest_level <
+			cpas_core->cpas_client[i]->ahb_level))
+			highest_level = cpas_core->cpas_client[i]->ahb_level;
+	}
+
+	CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+
+	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+		highest_level);
+	if (rc)
+		pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+			highest_level, rc);
+
+unlock_bus_client:
+	mutex_unlock(&ahb_bus_client->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!ahb_vote || (ahb_vote->vote.level == 0)) {
+		pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		ahb_vote->vote.freq,
+		cpas_core->cpas_client[client_indx]->ahb_level);
+
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+		cpas_core->cpas_client[client_indx], ahb_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_start *cmd_hw_start;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+	ahb_vote = cmd_hw_start->ahb_vote;
+	axi_vote = cmd_hw_start->axi_vote;
+
+	if (!ahb_vote || !axi_vote)
+		return -EINVAL;
+
+	if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+			ahb_vote->vote.level, axi_vote->compressed_bw,
+			axi_vote->uncompressed_bw);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client is not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is in start state\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		cpas_client->ahb_level);
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		ahb_vote);
+	if (rc)
+		goto done;
+
+	CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, axi_vote);
+	if (rc)
+		goto done;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("enable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+
+		if (cpas_core->internal_ops.power_on_settings) {
+			rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+			if (rc) {
+				cam_cpas_soc_disable_resources(
+					&cpas_hw->soc_info);
+				pr_err("failed in power_on settings rc=%d\n",
+					rc);
+				goto done;
+			}
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	}
+
+	cpas_client->started = true;
+	cpas_core->streamon_clients++;
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc = 0;
+
+	if (!hw_priv || !stop_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not started\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+	cpas_client->started = false;
+	cpas_core->streamon_clients--;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("disable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		&ahb_vote);
+	if (rc)
+		goto done;
+
+	axi_vote.uncompressed_bw = 0;
+	axi_vote.compressed_bw = 0;
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, &axi_vote);
+
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	int rc = 0;
+
+	if (!hw_priv || !init_hw_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("INIT HW size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_core->internal_ops.init_hw_version) {
+		rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+			(struct cam_cpas_hw_caps *)init_hw_args);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+	struct cam_cpas_client *cpas_client;
+	char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+	int32_t client_indx = -1;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+		register_params->identifier, register_params->cell_index);
+
+	if (soc_private->client_id_based)
+		snprintf(client_name, sizeof(client_name), "%s%d",
+			register_params->identifier,
+			register_params->cell_index);
+	else
+		snprintf(client_name, sizeof(client_name), "%s",
+			register_params->identifier);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+
+	rc = cam_cpas_util_get_string_index(soc_private->client_name,
+		soc_private->num_clients, client_name, &client_indx);
+	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("Invalid Client register : %s %d, %d\n",
+			register_params->identifier,
+			register_params->cell_index, client_indx);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EPERM;
+	}
+
+	cpas_client = kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+	if (!cpas_client) {
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -ENOMEM;
+	}
+
+	rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
+		cpas_client, client_indx);
+	if (rc) {
+		pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+			client_indx, rc);
+		kfree(cpas_client);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	register_params->client_handle =
+		CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+	memcpy(&cpas_client->data, register_params,
+		sizeof(struct cam_cpas_register_params));
+	cpas_core->cpas_client[client_indx] = cpas_client;
+	cpas_core->registered_clients++;
+
+	mutex_unlock(&cpas_hw->hw_mutex);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not stopped\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cam_cpas_util_remove_client_from_axi_port(
+		cpas_core->cpas_client[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	kfree(cpas_core->cpas_client[client_indx]);
+	cpas_core->cpas_client[client_indx] = NULL;
+	cpas_core->registered_clients--;
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_cpas_hw_caps *hw_caps;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+	*hw_caps = cpas_core->hw_caps;
+
+	return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+		pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
+			cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+		struct cam_cpas_register_params *register_params;
+
+		if (sizeof(struct cam_cpas_register_params) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		register_params = (struct cam_cpas_register_params *)cmd_args;
+		rc = cam_cpas_hw_register_client(hw_priv, register_params);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+		uint32_t *client_handle;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		client_handle = (uint32_t *)cmd_args;
+		rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_WRITE: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_write =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+			reg_write->reg_base, reg_write->offset, reg_write->mb,
+			reg_write->value);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_READ: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_read =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_read(hw_priv,
+			reg_read->client_handle, reg_read->reg_base,
+			reg_read->offset, reg_read->mb, &reg_read->value);
+
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AHB_VOTE: {
+		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+		rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+			cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AXI_VOTE: {
+		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+		rc = cam_cpas_hw_update_axi_vote(hw_priv,
+			cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+		break;
+	}
+	default:
+		pr_err("CPAS HW command not valid =%d\n", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		mutex_init(&cpas_core->client_mutex[i]);
+		cpas_core->cpas_client[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		if (cpas_core->cpas_client[i]) {
+			cam_cpas_hw_unregister_client(cpas_hw, i);
+			cpas_core->cpas_client[i] = NULL;
+		}
+		mutex_destroy(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+	struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+	if (rc) {
+		pr_err("failed to get arch-compat rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CAMSSTOP;
+		rc = cam_camsstop_get_internal_ops(internal_ops);
+	} else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CPASTOP;
+		rc = cam_cpastop_get_internal_ops(internal_ops);
+	} else {
+		pr_err("arch-compat %s not supported\n", compat_str);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_info *cpas_hw = NULL;
+	struct cam_hw_intf *cpas_hw_intf = NULL;
+	struct cam_cpas *cpas_core = NULL;
+	struct cam_cpas_private_soc *soc_private;
+	struct cam_cpas_internal_ops *internal_ops;
+
+	cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cpas_hw_intf)
+		return -ENOMEM;
+
+	cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cpas_hw) {
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+	if (!cpas_core) {
+		kfree(cpas_hw);
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+		cpas_core->regbase_index[i] = -1;
+
+	cpas_hw_intf->hw_priv = cpas_hw;
+	cpas_hw->core_info = cpas_core;
+
+	cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cpas_hw->soc_info.pdev = pdev;
+	cpas_hw->open_count = 0;
+	mutex_init(&cpas_hw->hw_mutex);
+	spin_lock_init(&cpas_hw->hw_lock);
+	init_completion(&cpas_hw->hw_complete);
+
+	cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+	cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+	cpas_hw_intf->hw_ops.deinit = NULL;
+	cpas_hw_intf->hw_ops.reset = NULL;
+	cpas_hw_intf->hw_ops.reserve = NULL;
+	cpas_hw_intf->hw_ops.release = NULL;
+	cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+	cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+	cpas_hw_intf->hw_ops.read = NULL;
+	cpas_hw_intf->hw_ops.write = NULL;
+	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+	internal_ops = &cpas_core->internal_ops;
+	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+	if (rc != 0)
+		goto release_mem;
+
+	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+		internal_ops->handle_irq, cpas_hw);
+	if (rc)
+		goto release_mem;
+
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cpas_core->num_clients = soc_private->num_clients;
+
+	if (internal_ops->setup_regbase) {
+		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+			cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+		if (rc)
+			goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_client_setup(cpas_hw);
+	if (rc) {
+		pr_err("failed in client setup, rc=%d\n", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+		cpas_hw->soc_info.pdev->dev.of_node,
+		&cpas_core->ahb_bus_client);
+	if (rc) {
+		pr_err("failed in ahb setup, rc=%d\n", rc);
+		goto client_cleanup;
+	}
+
+	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in axi setup, rc=%d\n", rc);
+		goto ahb_cleanup;
+	}
+
+	/* Need to vote first before enabling clocks */
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+	if (rc)
+		goto axi_cleanup;
+
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	if (internal_ops->get_hw_info) {
+		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+		if (rc) {
+			pr_err("failed in get_hw_info, rc=%d\n", rc);
+			goto disable_soc_res;
+		}
+	} else {
+		pr_err("Invalid get_hw_info\n");
+		goto disable_soc_res;
+	}
+
+	rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+		&cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+	if (rc)
+		goto disable_soc_res;
+
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+	if (rc)
+		goto axi_cleanup;
+
+	*hw_intf = cpas_hw_intf;
+	return 0;
+
+disable_soc_res:
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+remove_default_vote:
+	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+	cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_mem:
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+	pr_err("failed in hw probe\n");
+	return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+
+	if (!cpas_hw_intf) {
+		pr_err("cpas interface not initialized\n");
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+		pr_err("cpas hw is in power up state\n");
+		return -EINVAL;
+	}
+
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+	cam_cpas_util_client_cleanup(cpas_hw);
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
new file mode 100644
index 0000000..c181302
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CPAS_MAX_CLIENTS 20
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) ((indx >= 0) && (indx < CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
+	((CAM_CPAS_CLIENT_VALID(indx)) && \
+	(cpas_core->cpas_client[indx]))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx)          \
+	((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+	(cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+	CAM_REG_TYPE_READ,
+	CAM_REG_TYPE_WRITE,
+	CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on_settings: Function pointer for hw core specific power on settings
+ *
+ */
+struct cam_cpas_internal_ops {
+	int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	irqreturn_t (*handle_irq)(int irq_num, void *data);
+	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+		int32_t regbase_index[], int32_t num_reg_map);
+	int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+	bool enable;
+	enum cam_cpas_access_type access_type;
+	bool masked_value;
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @started: Whether client has streamed on
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @axi_sibling_client: Client's sibllings sharing the same axi port
+ *
+ */
+struct cam_cpas_client {
+	struct cam_cpas_register_params data;
+	bool started;
+	enum cam_vote_level ahb_level;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_axi_port *axi_port;
+	struct list_head axi_sibling_client;
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ *
+ */
+struct cam_cpas_bus_client {
+	int src;
+	int dst;
+	struct msm_bus_scale_pdata *pdata;
+	uint32_t client_id;
+	int num_usecases;
+	int num_paths;
+	unsigned int curr_vote_level;
+	bool dyn_vote;
+	struct mutex lock;
+	bool valid;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @sibling_port: Sibling AXI ports
+ * @clients_list_head: List head pointing to list of clients sharing this port
+ * @lock: Mutex lock for accessing this port
+ * @camnoc_bus: CAMNOC bus client info for this port
+ * @mnoc_bus: MNOC bus client info for this port
+ * @axi_port_name: Name of this AXI port
+ * @axi_port_node: Node representing this AXI Port
+ * @axi_port_mnoc_node: Node representing mnoc in this AXI Port
+ * @axi_port_camnoc_node: Node representing camnoc in this AXI Port
+ *
+ */
+struct cam_cpas_axi_port {
+	struct list_head sibling_port;
+	struct list_head clients_list_head;
+	struct mutex lock;
+	struct cam_cpas_bus_client camnoc_bus;
+	struct cam_cpas_bus_client mnoc_bus;
+	const char *axi_port_name;
+	struct device_node *axi_port_node;
+	struct device_node *axi_port_mnoc_node;
+	struct device_node *axi_port_camnoc_node;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @num_clients: Total number of clients that CPAS supports
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_ports_list_head: Head pointing to list of AXI ports
+ * @internal_ops: CPAS HW internal ops
+ *
+ */
+struct cam_cpas {
+	struct cam_cpas_hw_caps hw_caps;
+	struct cam_cpas_client *cpas_client[CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CPAS_MAX_CLIENTS];
+	uint32_t num_clients;
+	uint32_t registered_clients;
+	uint32_t streamon_clients;
+	int32_t regbase_index[CAM_CPAS_REG_MAX];
+	struct cam_cpas_bus_client ahb_bus_client;
+	struct list_head axi_ports_list_head;
+	struct cam_cpas_internal_ops internal_ops;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
new file mode 100644
index 0000000..d2c3e06
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -0,0 +1,137 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+
+#ifdef CONFIG_CAM_CPAS_DBG
+#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#undef pr_fmt
+#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
+
+#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+	CAM_HW_CPASTOP,
+	CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+	CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_REG_WRITE,
+	CAM_CPAS_HW_CMD_REG_READ,
+	CAM_CPAS_HW_CMD_AHB_VOTE,
+	CAM_CPAS_HW_CMD_AXI_VOTE,
+	CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+	uint32_t client_handle;
+	enum cam_cpas_reg_base reg_base;
+	uint32_t offset;
+	uint32_t value;
+	bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+	uint32_t client_handle;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+	uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+	uint32_t camera_family;
+	struct cam_hw_version camera_version;
+	struct cam_hw_version cpas_version;
+	uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
new file mode 100644
index 0000000..fdebdc7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -0,0 +1,605 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME    "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+	struct platform_device *pdev;
+	struct cam_subdev subdev;
+	struct cam_hw_intf *hw_intf;
+	struct cam_cpas_hw_caps hw_caps;
+	struct mutex intf_lock;
+	uint32_t open_cnt;
+	bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+	struct cam_hw_version *camera_version)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!camera_family || !camera_version) {
+		pr_err("invalid input %pK %pK\n", camera_family,
+			camera_version);
+		return -EINVAL;
+	}
+
+	*camera_family = g_cpas_intf->hw_caps.camera_family;
+	*camera_version = g_cpas_intf->hw_caps.camera_version;
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+		cmd_reg_write.client_handle = client_handle;
+		cmd_reg_write.reg_base = reg_base;
+		cmd_reg_write.offset = offset;
+		cmd_reg_write.value = value;
+		cmd_reg_write.mb = mb;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t *value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!value) {
+		pr_err("Invalid arg value\n");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+		cmd_reg_read.client_handle = client_handle;
+		cmd_reg_read.reg_base = reg_base;
+		cmd_reg_read.offset = offset;
+		cmd_reg_read.mb = mb;
+		cmd_reg_read.value = 0;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc) {
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			return rc;
+		}
+
+		*value = cmd_reg_read.value;
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+		cmd_axi_vote.client_handle = client_handle;
+		cmd_axi_vote.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+			sizeof(struct cam_cpas_hw_cmd_axi_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+		cmd_ahb_vote.client_handle = client_handle;
+		cmd_ahb_vote.ahb_vote = ahb_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.stop) {
+		struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+		cmd_hw_stop.client_handle = client_handle;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.stop(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+			sizeof(struct cam_cpas_hw_cmd_stop));
+		if (rc)
+			pr_err("Failed in stop, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid stop ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.start) {
+		struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+		cmd_hw_start.client_handle = client_handle;
+		cmd_hw_start.ahb_vote = ahb_vote;
+		cmd_hw_start.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.start(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+			sizeof(struct cam_cpas_hw_cmd_start));
+		if (rc)
+			pr_err("Failed in start, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid start ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+			&client_handle, sizeof(uint32_t));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+			sizeof(struct cam_cpas_register_params));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+	struct cam_control *cmd)
+{
+	int rc;
+
+	if (!cmd) {
+		pr_err("Invalid input cmd\n");
+		return -EINVAL;
+	}
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_cpas_query_cap query;
+
+		rc = copy_from_user(&query, (void __user *) cmd->handle,
+			sizeof(query));
+		if (rc) {
+			pr_err("Failed in copy from user, rc=%d\n", rc);
+			break;
+		}
+
+		rc = cam_cpas_get_hw_info(&query.camera_family,
+			&query.camera_version);
+		if (rc)
+			break;
+
+		rc = copy_to_user((void __user *) cmd->handle, &query,
+			sizeof(query));
+		if (rc)
+			pr_err("Failed in copy to user, rc=%d\n", rc);
+
+		break;
+	}
+	default:
+		pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt++;
+	CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt--;
+	CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+	.ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+	.core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+	.open = cam_cpas_subdev_open,
+	.close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_subdev *subdev;
+
+	if (!g_cpas_intf)
+		return -EINVAL;
+
+	subdev = &g_cpas_intf->subdev;
+
+	subdev->name = CAM_CPAS_DEV_NAME;
+	subdev->pdev = pdev;
+	subdev->ops = &cpas_subdev_ops;
+	subdev->internal_ops = &cpas_subdev_intern_ops;
+	subdev->token = g_cpas_intf;
+	subdev->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+	rc = cam_register_subdev(subdev);
+	if (rc) {
+		pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+		return rc;
+	}
+
+	platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+	return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_hw_caps *hw_caps;
+	struct cam_hw_intf *hw_intf;
+	int rc;
+
+	if (g_cpas_intf) {
+		pr_err("cpas dev proble already done\n");
+		return -EALREADY;
+	}
+
+	g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+	if (!g_cpas_intf)
+		return -ENOMEM;
+
+	mutex_init(&g_cpas_intf->intf_lock);
+	g_cpas_intf->pdev = pdev;
+
+	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+	if (rc || (g_cpas_intf->hw_intf == NULL)) {
+		pr_err("Failed in hw probe, rc=%d\n", rc);
+		goto error_destroy_mem;
+	}
+
+	hw_intf = g_cpas_intf->hw_intf;
+	hw_caps = &g_cpas_intf->hw_caps;
+	if (hw_intf->hw_ops.get_hw_caps) {
+		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+			hw_caps, sizeof(struct cam_cpas_hw_caps));
+		if (rc) {
+			pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+			goto error_hw_remove;
+		}
+	} else {
+		pr_err("Invalid get_hw_caps ops\n");
+		goto error_hw_remove;
+	}
+
+	rc = cam_cpas_subdev_register(pdev);
+	if (rc)
+		goto error_hw_remove;
+
+	g_cpas_intf->probe_done = true;
+	CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return rc;
+
+error_hw_remove:
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+	pr_err("CPAS probe failed\n");
+	return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_cpas_intf->intf_lock);
+	cam_unregister_subdev(&g_cpas_intf->subdev);
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+	mutex_unlock(&g_cpas_intf->intf_lock);
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+	{.compatible = "qcom,cam-cpas"},
+	{}
+};
+
+static struct platform_driver cam_cpas_driver = {
+	.probe = cam_cpas_dev_probe,
+	.remove = cam_cpas_dev_remove,
+	.driver = {
+		.name = CAM_CPAS_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cpas_dt_match,
+	},
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+	return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
new file mode 100644
index 0000000..0a8e6bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
+	struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *of_node;
+	int count = 0, i = 0, rc = 0;
+
+	if (!soc_private || !pdev) {
+		pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&soc_private->arch_compat);
+	if (rc) {
+		pr_err("device %s failed to read arch-compat\n", pdev->name);
+		return rc;
+	}
+
+	soc_private->client_id_based = of_property_read_bool(of_node,
+		"client-id-based");
+
+	count = of_property_count_strings(of_node, "client-names");
+	if (count <= 0) {
+		pr_err("no client-names found\n");
+		count = 0;
+		return -EINVAL;
+	}
+	soc_private->num_clients = count;
+	CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+		soc_private->arch_compat, soc_private->client_id_based,
+		soc_private->num_clients);
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-names", i, &soc_private->client_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+	}
+
+	count = of_property_count_strings(of_node, "client-axi-port-names");
+	if ((count <= 0) || (count != soc_private->num_clients)) {
+		pr_err("incorrect client-axi-port-names info %d %d\n",
+			count, soc_private->num_clients);
+		count = 0;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-axi-port-names", i,
+			&soc_private->client_axi_port_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+			soc_private->client_axi_port_name[i]);
+	}
+
+	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
+		"client-bus-camnoc-based");
+
+	return 0;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		pr_err("failed in get_dt_properties, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (soc_info->irq_line && !irq_handler) {
+		pr_err("Invalid IRQ handler\n");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		irq_data);
+	if (rc) {
+		pr_err("failed in request_platform_resource, rc=%d\n", rc);
+		return rc;
+	}
+
+	soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+		GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+
+	rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
+	if (rc) {
+		pr_err("failed in get_custom_info, rc=%d\n", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		pr_err("release platform failed, rc=%d\n", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("enable platform resource failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("disable platform failed, rc=%d\n", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
new file mode 100644
index 0000000..fdd9386
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+
+#define CAM_CPAS_MAX_CLIENTS 20
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @axi_camnoc_based: Whether AXi access is camnoc based
+ * @client_axi_port_name: AXI Port name for each client
+ * @axi_port_list_node : Node representing AXI Ports list
+ *
+ */
+struct cam_cpas_private_soc {
+	const char *arch_compat;
+	bool client_id_based;
+	uint32_t num_clients;
+	const char *client_name[CAM_CPAS_MAX_CLIENTS];
+	bool axi_camnoc_based;
+	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
+	struct device_node *axi_port_list_node;
+};
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
new file mode 100644
index 0000000..bce10cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
new file mode 100644
index 0000000..fa8ab89
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	CPAS_CDBG("Family %d, version %d.%d.%d\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+	return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camss", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMSS] = index;
+	} else {
+		pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+	internal_ops->init_hw_version = NULL;
+	internal_ops->handle_irq = NULL;
+	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+	internal_ops->power_on_settings = NULL;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
new file mode 100644
index 0000000..820a0df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
new file mode 100644
index 0000000..415de47
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -0,0 +1,301 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+	hw_caps->cpas_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->cpas_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->cpas_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+	hw_caps->camera_capability = reg_value;
+
+	CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_cpas_top", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+	} else {
+		pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camnoc", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+	} else {
+		pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	uint32_t reg_value;
+	int i;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	for (i = 0; i < camnoc_info->error_logger_size; i++) {
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i)
+{
+	uint32_t reg_value;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+{
+	pr_err("ahb timout error\n");
+
+	return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+	camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+	camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+	camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+	return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_clear);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_clear);
+	}
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_enable);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_enable);
+	}
+
+	return 0;
+}
+
+irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	uint32_t irq_status;
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int i;
+	enum cam_camnoc_hw_irq_type irq_type;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+			(camnoc_info->irq_err[i].enable)) {
+			irq_type = camnoc_info->irq_err[i].irq_type;
+			pr_err("Error occurred, type=%d\n", irq_type);
+
+			switch (irq_type) {
+			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+				cam_cpastop_handle_errlogger(cpas_core,
+					soc_info);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+				cam_cpastop_handle_ubwc_err(cpas_core,
+					soc_info, i);
+				break;
+			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+				cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+				break;
+			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+				CPAS_CDBG("TEST IRQ\n");
+				break;
+			default:
+				break;
+			}
+
+			irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+		}
+	}
+
+	if (irq_status)
+		pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+
+	if (TEST_IRQ_ENABLE)
+		cam_cpastop_disable_test_irq(cpas_hw);
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0)) {
+			camnoc_info = &cam170_cpas100_camnoc_info;
+		} else {
+			pr_err("CPAS Version not supported %d.%d.%d\n",
+				hw_caps->cpas_version.major,
+				hw_caps->cpas_version.minor,
+				hw_caps->cpas_version.incr);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Camera Version not supported %d.%d.%d\n",
+			hw_caps->camera_version.major,
+			hw_caps->camera_version.minor,
+			hw_caps->camera_version.incr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+	internal_ops->handle_irq = cam_cpastop_handle_irq;
+	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+	internal_ops->power_on_settings = cam_cpastop_static_settings;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
new file mode 100644
index 0000000..99aae3f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                                 1 QHB port) has an error logger. The error
+ *                                 observed at any slave port is logged into
+ *                                 the error logger register and an IRQ is
+ *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE0 UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE1 or IFE3
+ *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS
+ *                                               UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP
+ *                                               slave  times out after 4000
+ *                                               AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST              : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+		CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+	CAM_CAMNOC_HW_IRQ_RESERVED1,
+	CAM_CAMNOC_HW_IRQ_RESERVED2,
+	CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ *         settings like QoS, LUT mappings need to be configured for
+ *         each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+	CAM_CAMNOC_CDM,
+	CAM_CAMNOC_IFE02,
+	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IPE_BPS_LRME_READ,
+	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+	CAM_CAMNOC_JPEG,
+	CAM_CAMNOC_FD,
+	CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+	enum cam_camnoc_port_type port_type;
+	bool enable;
+	struct cam_cpas_reg priority_lut_low;
+	struct cam_cpas_reg priority_lut_high;
+	struct cam_cpas_reg urgency;
+	struct cam_cpas_reg danger_lut;
+	struct cam_cpas_reg safe_lut;
+	struct cam_cpas_reg ubwc_ctl;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+	struct cam_cpas_reg sbm_enable;
+	struct cam_cpas_reg sbm_status;
+	struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+	enum cam_camnoc_hw_irq_type irq_type;
+	bool enable;
+	uint32_t sbm_port;
+	struct cam_cpas_reg err_enable;
+	struct cam_cpas_reg err_status;
+	struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @error_logger_size: Array size of IRQ Error logger
+ *
+ */
+struct cam_camnoc_info {
+	struct cam_camnoc_specific *specific;
+	int specific_size;
+	struct cam_camnoc_irq_sbm *irq_sbm;
+	struct cam_camnoc_irq_err *irq_err;
+	int irq_err_size;
+	uint32_t *error_logger;
+	int error_logger_size;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
new file mode 100644
index 0000000..8686bd5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -0,0 +1,532 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x7 : 0x3,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x7,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x7,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = false,
+	}
+};
+
+uint32_t slave_error_logger[] = {
+	0x2700, /* ERRLOGGER_SWID_LOW */
+	0x2704, /* ERRLOGGER_SWID_HIGH */
+	0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+	.specific = &cam_cpas100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
+		sizeof(cam_cpas100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas100_irq_sbm,
+	.irq_err = &cam_cpas100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas100_irq_err) /
+		sizeof(cam_cpas100_irq_err[0]),
+	.error_logger = &slave_error_logger[0],
+	.error_logger_size = sizeof(slave_error_logger) /
+		sizeof(slave_error_logger[0]),
+};
+
+#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
new file mode 100644
index 0000000..f6b0729
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -0,0 +1,324 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ *                          are the identifiers used in generic register
+ *                          write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+	CAM_CPAS_REG_CPASTOP,
+	CAM_CPAS_REG_CAMNOC,
+	CAM_CPAS_REG_CAMSS,
+	CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                              1 QHB port) has an error logger. The error
+ *                              observed at any slave port is logged into
+ *                              the error logger register and an IRQ is
+ *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 or IFE3 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC decoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP slave
+ *                                            times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier        : Input identifier string which is the device label
+ *                      from dt like vfe, ife, jpeg etc
+ * @cell_index        : Input integer identifier pointing to the cell index
+ *                      from dt of the device. This can be used to form a
+ *                      unique string with @identifier like vfe0, ife1,
+ *                      jpeg0, etc
+ * @dev               : device handle
+ * @userdata          : Input private data which will be passed as
+ *                      an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ *                      callbacks from CPAS driver.
+ *                      @client_handle : CPAS client handle
+ *                      @userdata    : User data given at the time of register
+ *                      @event_type  : event type
+ *                      @event_data  : event data
+ * @client_handle       : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+	char            identifier[CAM_HW_IDENTIFIER_LENGTH];
+	uint32_t        cell_index;
+	struct device  *dev;
+	void           *userdata;
+	void          (*cam_cpas_client_cb)(
+			int32_t                   client_handle,
+			void                     *userdata,
+			enum cam_camnoc_irq_type  event_type,
+			uint32_t                  event_data);
+	uint32_t        client_handle;
+};
+
+/**
+ * enum cam_vote_level - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC  : Dynamic vote
+ */
+enum cam_vote_type {
+	CAM_VOTE_ABSOLUTE,
+	CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_SVS_VOTE     : SVS vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE   : Turbo vote
+ */
+enum cam_vote_level {
+	CAM_SUSPEND_VOTE,
+	CAM_SVS_VOTE,
+	CAM_NOMINAL_VOTE,
+	CAM_TURBO_VOTE,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type  : AHB voting type.
+ *          CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ *          CAM_VOTE_DYNAMIC  : vote calculated dynamically using 'freq'
+ *                              and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq  : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+	enum cam_vote_type   type;
+	union {
+		enum cam_vote_level  level;
+		unsigned long        freq;
+	} vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
+ *                    This is the required bandwidth for uncompressed
+ *                    data traffic between hw core and camnoc.
+ * @compressed_bw   : Bus bandwidth required in Bytes for compressed data.
+ *                    This is the required bandwidth for compressed
+ *                    data traffic between camnoc and mmnoc.
+ *
+ * If one of the above is not applicable to a hw client, it has to
+ * fill the same values in both.
+ *
+ */
+struct cam_axi_vote {
+	uint64_t   uncompressed_bw;
+	uint64_t   compressed_bw;
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ *     bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ *     to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote     : Pointer to ahb vote info
+ * @axi_vote     : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote,
+	struct cam_axi_vote   *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ *     would be removed for this client on this call. Clients should not
+ *     use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ *     to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote      : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote      : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+	uint32_t             client_handle,
+	struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg write with memory barrier
+ * @value         : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                  value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg read with memory barrier
+ * @value         : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                 *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family  : Camera family type. One of
+ *                   CAM_FAMILY_CAMERA_SS
+ *                   CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+	uint32_t                 *camera_family,
+	struct cam_hw_version    *camera_version);
+
+#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
new file mode 100644
index 0000000..b6e2d09
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
new file mode 100644
index 0000000..c304eed
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -0,0 +1,1299 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_isp_context.h"
+#include "cam_isp_log.h"
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
+	struct cam_isp_context *ctx_isp,
+	struct cam_isp_hw_done_event_data *done,
+	uint32_t bubble_state)
+{
+	int rc = 0;
+	int i, j;
+	struct cam_ctx_request  *req;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->active_req_list)) {
+		CDBG("Buf done with no active request!\n");
+		goto end;
+	}
+
+	CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+
+	req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	for (i = 0; i < done->num_handles; i++) {
+		for (j = 0; j < req_isp->num_fence_map_out; j++) {
+			if (done->resource_handle[i] ==
+				req_isp->fence_map_out[j].resource_handle)
+			break;
+		}
+
+		if (j == req_isp->num_fence_map_out) {
+			pr_err("Can not find matching lane handle 0x%x!\n",
+				done->resource_handle[i]);
+			rc = -EINVAL;
+			continue;
+		}
+
+		if (!bubble_state) {
+			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+		} else if (!req_isp->bubble_report) {
+			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR);
+		} else {
+			/*
+			 * Ignore the buffer done if bubble detect is on
+			 * In most case, active list should be empty when
+			 * bubble detects. But for safety, we just move the
+			 * current active request to the pending list here.
+			 */
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			continue;
+		}
+
+		CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+			   req->request_id,
+			   req_isp->fence_map_out[j].sync_id);
+		req_isp->num_acked++;
+		req_isp->fence_map_out[j].sync_id = -1;
+	}
+
+	if (req_isp->num_acked == req_isp->num_fence_map_out) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("Reg upd ack with no pending request\n");
+		goto end;
+	}
+	req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	list_del_init(&req->list);
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp->num_fence_map_out != 0) {
+		CDBG("%s: move request %lld to active list\n", __func__,
+			req->request_id);
+		if (!list_empty(&ctx->active_req_list))
+			pr_err("%s: More than one entry in active list\n",
+				__func__);
+		list_add_tail(&req->list, &ctx->active_req_list);
+	} else {
+		/* no io config, so the request is completed. */
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	/*
+	 * This function only called directly from applied and bubble applied
+	 * state so change substate here.
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_notify_sof_in_actived_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_req_mgr_sof_notify  notify;
+	struct cam_context *ctx = ctx_isp->base;
+
+	/* notify reqmgr with sof  signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify SOF to CRM\n", __func__);
+	}
+
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	CDBG("%s: Enter\n", __func__);
+	ctx_isp->frame_id++;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request *req;
+	struct cam_isp_ctx_req *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (ctx->state != CAM_CTX_ACTIVATED) {
+		CDBG("%s: invalid RUP\n", __func__);
+		goto end;
+	}
+
+	/*
+	 * This is for the first update. The initial setting will
+	 * cause the reg_upd in the first frame.
+	 */
+	if (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (req_isp->num_fence_map_out == req_isp->num_acked)
+			list_add_tail(&req->list, &ctx->free_req_list);
+		else {
+			/* need to handle the buf done */
+			list_add_tail(&req->list, &ctx->active_req_list);
+			ctx_isp->substate_activated =
+				CAM_ISP_CTX_ACTIVATED_EPOCH;
+		}
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		pr_err("%s: No pending request\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		pr_err("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+end:
+	return rc;
+}
+
+
+static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	ctx_isp->frame_id++;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	pr_err("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_sof_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+
+static int __cam_isp_ctx_epoch_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	/*
+	 * This means we missed the reg upd ack. So we need to
+	 * transition to BUBBLE state again.
+	 */
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * Just go back to the bubble state.
+		 */
+		pr_err("%s: No pending request.\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+end:
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int                              rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_req_mgr_error_notify  notify;
+
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_isp_hw_error_event_data  *error_event_data =
+			(struct cam_isp_hw_error_event_data *)evt_data;
+
+	uint32_t error_type = error_event_data->error_type;
+
+	CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
+		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
+		notify.error = CRM_KMD_ERR_FATAL;
+
+	/*
+	 * Need to check the active req
+	 * move all of them to the pending request list
+	 * Note this funciton need revisit!
+	 */
+
+	if (list_empty(&ctx->active_req_list)) {
+		pr_err("handling error with no active request!\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+
+		ctx->ctx_crm_intf->notify_err(&notify);
+		pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+		rc = -EFAULT;
+	}
+
+	list_del_init(&req->list);
+	list_add(&req->list, &ctx->pending_req_list);
+	/* might need to check if active list is empty */
+
+end:
+	CDBG("%s: Exit\n", __func__);
+	return rc;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_sof,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_epoch,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* Bubble Applied */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble_applied,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_bubble_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_apply_req_in_activated_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
+	uint32_t next_state)
+{
+	int rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp;
+	struct cam_hw_config_args        cfg;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("%s: No available request for Apply id %lld\n",
+			__func__, apply->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+
+	/*
+	 * Check whehter the request id is matching the tip, if not, this means
+	 * we are in the middle of the error handling. Need to reject this apply
+	 */
+	if (req->request_id != apply->request_id) {
+		rc = -EFAULT;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+
+	req_isp->bubble_report = apply->report_if_bubble;
+
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.num_hw_update_entries = req_isp->num_cfg;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		pr_err("%s: Can not apply the configuration\n", __func__);
+	} else {
+		spin_lock(&ctx->lock);
+		ctx_isp->substate_activated = next_state;
+		CDBG("%s: new state %d\n", __func__, next_state);
+		spin_unlock(&ctx->lock);
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_sof(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_epoch(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_bubble(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_sof,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_epoch,
+		},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_bubble,
+		},
+		.irq_ops = NULL,
+	},
+	/* Bubble Applied */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+
+/* top level state machine */
+static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_release_args       rel_arg;
+	struct cam_ctx_request	        *req;
+	struct cam_isp_ctx_req	        *req_isp;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (ctx_isp->hw_ctx) {
+		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&rel_arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->crm_ctx_intf = NULL;
+	ctx_isp->frame_id = 0;
+
+	/*
+	 * Ideally, we should never have any active request here.
+	 * But we still add some sanity check code here to help the debug
+	 */
+	if (!list_empty(&ctx->active_req_list))
+		pr_err("%s: Active list is empty.\n", __func__);
+
+	/* flush the pending list */
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		pr_err("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_top_state(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request           *req = NULL;
+	struct cam_isp_ctx_req           *req_isp;
+	uint64_t                          packet_addr;
+	struct cam_packet                *packet;
+	size_t                            len = 0;
+	struct cam_hw_prepare_update_args cfg;
+	struct cam_req_mgr_add_request    add_req;
+	struct cam_isp_context           *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: get free request object......\n", __func__);
+
+	/* get free request */
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		pr_err("%s: No more request obj free\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		(uint64_t *) &packet_addr, &len);
+	if (rc != 0) {
+		pr_err("%s: Can not get packet address\n", __func__);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
+	CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
+	CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+		len, cmd->offset);
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+	CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
+	CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.out_map_entries = req_isp->fence_map_out;
+	cfg.in_map_entries = req_isp->fence_map_in;
+
+	CDBG("%s: try to prepare config packet......\n", __func__);
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		pr_err("%s: Prepare config packet failed in HW layer\n",
+			__func__);
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req_isp->num_cfg = cfg.num_hw_update_entries;
+	req_isp->num_fence_map_out = cfg.num_out_map_entries;
+	req_isp->num_fence_map_in = cfg.num_in_map_entries;
+	req_isp->num_acked = 0;
+
+	CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
+		__func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+		req_isp->num_fence_map_in);
+
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+
+	if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+		add_req.link_hdl = ctx->link_hdl;
+		add_req.dev_hdl  = ctx->dev_hdl;
+		add_req.req_id   = req->request_id;
+		rc = ctx->ctx_crm_intf->add_req(&add_req);
+		if (rc) {
+			pr_err("%s: Error: Adding request id=%llu\n", __func__,
+				req->request_id);
+				goto free_req;
+		}
+	}
+
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->pending_req_list);
+	spin_unlock(&ctx->lock);
+
+	CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+		req->request_id);
+
+	return rc;
+
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	spin_unlock(&ctx->lock);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_acquire_args       param;
+	struct cam_isp_resource         *isp_res = NULL;
+	struct cam_create_dev_hdl        req_hdl_param;
+	struct cam_hw_release_args       release;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready!\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
+		 __func__, cmd->session_handle, cmd->num_resources,
+		cmd->handle_type, cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
+		pr_err("Too much resources in the acquire!\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		pr_err("%s: Only user pointer is supported!", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	isp_res = kzalloc(
+		sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
+	if (!isp_res) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	CDBG("%s: start copy %d resources from user\n",
+		__func__, cmd->num_resources);
+
+	if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
+		sizeof(*isp_res)*cmd->num_resources)) {
+		rc = -EFAULT;
+		goto free_res;
+	}
+
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = (uint64_t) isp_res;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		pr_err("Acquire device failed\n");
+		goto free_res;
+	}
+
+	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+	req_hdl_param.priv = ctx;
+
+	CDBG("%s: get device handle form bridge\n", __func__);
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		pr_err("Can not create device handle\n");
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+	kfree(isp_res);
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx_isp->hw_ctx = NULL;
+free_res:
+	kfree(isp_res);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+
+	if (!rc && ctx->link_hdl)
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc = 0;
+
+	CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+
+	ctx->link_hdl = link->link_hdl;
+	ctx->ctx_crm_intf = link->crm_cb;
+
+	/* change state only if we had the init config */
+	if (!list_empty(&ctx->pending_req_list))
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *dev_info)
+{
+	int rc = 0;
+
+	dev_info->dev_hdl = ctx->dev_hdl;
+	strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
+	dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
+	dev_info->p_delay = 1;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args         arg;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (cmd->session_handle != ctx->session_hdl ||
+		cmd->dev_handle != ctx->dev_hdl) {
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/* should never happen */
+		pr_err("%s: Start device with empty configuration\n",
+			__func__);
+		rc = -EFAULT;
+		goto end;
+	} else {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	}
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	if (!ctx_isp->hw_ctx) {
+		pr_err("%s:%d: Wrong hw context pointer.\n",
+			__func__, __LINE__);
+		rc = -EFAULT;
+		goto end;
+	}
+	arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	arg.hw_update_entries = req_isp->cfg;
+	arg.num_hw_update_entries = req_isp->num_cfg;
+
+	ctx_isp->frame_id = 0;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	/*
+	 * Only place to change state before calling the hw due to
+	 * hardware tasklet has higher priority that can cause the
+	 * irq handling comes early
+	 */
+	ctx->state = CAM_CTX_ACTIVATED;
+	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	if (rc) {
+		/* HW failure. user need to clean up the resource */
+		pr_err("Start HW failed\n");
+		ctx->state = CAM_CTX_READY;
+		goto end;
+	}
+	CDBG("%s: start device success\n", __func__);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated_unlock(
+	struct cam_context *ctx)
+{
+	int rc = 0;
+	uint32_t i;
+	struct cam_hw_stop_args          stop;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	/* Mask off all the incoming hardware events */
+	spin_lock(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock(&ctx->lock);
+	CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+
+	/* stop hw first */
+	if (ctx_isp->hw_ctx) {
+		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in active list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx_isp->frame_id = 0;
+
+	CDBG("%s: next state %d", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	ctx->state = CAM_CTX_ACQUIRED;
+	return rc;
+}
+
+static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+
+	if (ctx_isp->hw_ctx) {
+		struct cam_hw_release_args   arg;
+
+		arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	ctx->state =  CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: Enter: apply req in Substate %d\n",
+		__func__, ctx_isp->substate_activated);
+	if (ctx_isp->substate_machine[ctx_isp->substate_activated].
+		crm_ops.apply_req) {
+		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
+			crm_ops.apply_req(ctx, apply);
+	} else {
+		pr_err("%s: No handle function in activated substate %d\n",
+			__func__, ctx_isp->substate_activated);
+		rc = -EFAULT;
+	}
+
+	if (rc)
+		pr_err("%s: Apply failed in active substate %d\n",
+			__func__, ctx_isp->substate_activated);
+	return rc;
+}
+
+
+
+static int __cam_isp_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	spin_lock(&ctx->lock);
+	CDBG("%s: Enter: State %d Substate %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated);
+	if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+		irq_ops[evt_id]) {
+		rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+			irq_ops[evt_id](ctx_isp, evt_data);
+	} else {
+		CDBG("%s: No handle function for substate %d\n", __func__,
+			ctx_isp->substate_activated);
+	}
+	CDBG("%s: Exit: State %d Substate %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated);
+	spin_unlock(&ctx->lock);
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_acquired,
+		},
+		.crm_ops = {
+			.link = __cam_isp_ctx_link_in_acquired,
+			.unlink = __cam_isp_ctx_unlink_in_acquired,
+			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+		},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.start_dev = __cam_isp_ctx_start_dev_in_ready,
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_ready,
+		},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+			.release_dev = __cam_isp_ctx_release_dev_in_activated,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req,
+		},
+		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_intf)
+
+{
+	int rc = -1;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		pr_err("%s: Invalid Context\n", __func__);
+		goto err;
+	}
+
+	/* ISP context setup */
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+	ctx->frame_id = 0;
+	ctx->hw_ctx = NULL;
+	ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	ctx->substate_machine = cam_isp_ctx_activated_state_machine;
+	ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		ctx->req_base[i].req_priv = &ctx->req_isp[i];
+		ctx->req_isp[i].base = &ctx->req_base[i];
+	}
+
+	/* camera context setup */
+	rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		pr_err("%s: Camera Context Base init failed\n", __func__);
+		goto err;
+	}
+
+	/* link camera context with isp context */
+	ctx_base->state_machine = cam_isp_ctx_top_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_isp_context_deinit(struct cam_isp_context *ctx)
+{
+	int rc = 0;
+
+	if (ctx->base)
+		cam_context_deinit(ctx->base);
+
+	if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
+		pr_err("%s: ISP context substate is invalid\n", __func__);
+
+	memset(ctx, 0, sizeof(*ctx));
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
new file mode 100644
index 0000000..dae1dda
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_CONTEXT_H_
+#define _CAM_ISP_CONTEXT_H_
+
+
+#include <linux/spinlock.h>
+#include <uapi/media/cam_isp.h>
+
+#include "cam_context.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/*
+ * Maximum hw resource - This number is based on the maximum
+ * output port resource. The current maximum resource number
+ * is 20.
+ */
+#define CAM_ISP_CTX_RES_MAX                     20
+
+/*
+ * Maxiimum configuration entry size  - This is based on the
+ * worst case DUAL IFE use case plus some margin.
+ */
+#define CAM_ISP_CTX_CFG_MAX                     20
+
+/* forward declaration */
+struct cam_isp_context;
+
+/* cam isp context irq handling function type */
+typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp,
+	void *evt_data);
+
+/**
+ * enum cam_isp_ctx_activated_substate - sub states for activated
+ *
+ */
+enum cam_isp_ctx_activated_substate {
+	CAM_ISP_CTX_ACTIVATED_SOF,
+	CAM_ISP_CTX_ACTIVATED_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_EPOCH,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_HALT,
+	CAM_ISP_CTX_ACTIVATED_MAX,
+};
+
+
+/**
+ * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops:               Array of handle function pointers.
+ *
+ */
+struct cam_isp_ctx_irq_ops {
+	cam_isp_hw_event_cb_func         irq_ops[CAM_ISP_HW_EVENT_MAX];
+};
+
+/**
+ * struct cam_isp_ctx_req - ISP context request object
+ *
+ * @base:                  Common request object ponter
+ * @cfg:                   ISP hardware configuration array
+ * @num_cfg:               Number of ISP hardware configuration entries
+ * @fence_map_out:         Output fence mapping array
+ * @num_fence_map_out:     Number of the output fence map
+ * @fence_map_in:          Input fence mapping array
+ * @num_fence_map_in:      Number of input fence map
+ * @num_acked:             Count to track acked entried for output.
+ *                         If count equals the number of fence out, it means
+ *                         the request has been completed.
+ * @bubble_report:         Flag to track if bubble report is active on
+ *                         current request
+ *
+ */
+struct cam_isp_ctx_req {
+	struct cam_ctx_request          *base;
+
+	struct cam_hw_update_entry       cfg[CAM_ISP_CTX_CFG_MAX];
+	uint32_t                         num_cfg;
+	struct cam_hw_fence_map_entry    fence_map_out[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_out;
+	struct cam_hw_fence_map_entry    fence_map_in[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_in;
+	uint32_t                         num_acked;
+	int32_t                          bubble_report;
+};
+
+/**
+ * struct cam_isp_context  - ISP context object
+ *
+ * @base:                  Common context object pointer
+ * @frame_id:              Frame id tracking for the isp context
+ * @substate_actiavted:    Current substate for the activated state.
+ * @substate_machine:      ISP substate machine for external interface
+ * @substate_machine_irq:  ISP substate machine for irq handling
+ * @req_base:              Common request object storage
+ * @req_isp:               ISP private request object storage
+ * @hw_ctx:                HW object returned by the acquire device command
+ *
+ */
+struct cam_isp_context {
+	struct cam_context              *base;
+
+	int64_t                          frame_id;
+	uint32_t                         substate_activated;
+	struct cam_ctx_ops              *substate_machine;
+	struct cam_isp_ctx_irq_ops      *substate_machine_irq;
+
+	struct cam_ctx_request           req_base[CAM_CTX_REQ_MAX];
+	struct cam_isp_ctx_req           req_isp[CAM_CTX_REQ_MAX];
+
+	void                            *hw_ctx;
+};
+
+/**
+ * cam_isp_context_init()
+ *
+ * @brief:              Initialization function for the ISP context
+ *
+ * @ctx:                ISP context obj to be initialized
+ * @bridge_ops:         Bridge call back funciton
+ * @hw_intf:            ISP hw manager interface
+ *
+ */
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *bridge_ops,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_isp_context_deinit()
+ *
+ * @brief:               Deinitialize function for the ISP context
+ *
+ * @ctx:                 ISP context obj to be deinitialized
+ *
+ */
+int cam_isp_context_deinit(struct cam_isp_context *ctx);
+
+
+#endif  /* __CAM_ISP_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
new file mode 100644
index 0000000..9768912
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_isp_dev.h"
+#include "cam_isp_log.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_node.h"
+
+static struct cam_isp_dev g_isp_dev;
+
+static const struct of_device_id cam_isp_dt_match[] = {
+	{
+		.compatible = "qcom,cam-isp"
+	},
+	{}
+};
+
+static int cam_isp_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+
+	/* clean up resources */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
+		if (rc)
+			pr_err("%s: ISP context %d deinit failed\n",
+				__func__, i);
+	}
+
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+	if (rc)
+		pr_err("%s: Unregister failed\n", __func__);
+
+	memset(&g_isp_dev, 0, sizeof(g_isp_dev));
+	return 0;
+}
+
+static int cam_isp_dev_probe(struct platform_device *pdev)
+{
+	int rc = -1;
+	int i;
+	struct cam_hw_mgr_intf         hw_mgr_intf;
+	struct cam_node               *node;
+
+	/* Initialze the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+		CAM_IFE_DEVICE_TYPE);
+	if (rc) {
+		pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+		goto err;
+	}
+	node = (struct cam_node *) g_isp_dev.sd.token;
+
+	/* Initialize the context list */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
+			&g_isp_dev.ctx[i],
+			&node->crm_node_intf,
+			&node->hw_mgr_intf);
+		if (rc) {
+			pr_err("%s: ISP context init failed!\n", __func__);
+			goto unregister;
+		}
+	}
+
+	/* Initialize the cam node */
+	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
+		CAM_ISP_DEV_NAME);
+	if (rc) {
+		pr_err("%s: ISP node init failed!\n", __func__);
+		goto unregister;
+	}
+
+	pr_info("%s: Camera ISP probe complete\n", __func__);
+
+	return 0;
+unregister:
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+err:
+	return rc;
+}
+
+
+static struct platform_driver isp_driver = {
+	.probe = cam_isp_dev_probe,
+	.remove = cam_isp_dev_remove,
+	.driver = {
+		.name = "cam_isp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_isp_dt_match,
+	},
+};
+
+static int __init cam_isp_dev_init_module(void)
+{
+	return platform_driver_register(&isp_driver);
+}
+
+static void __exit cam_isp_dev_exit_module(void)
+{
+	platform_driver_unregister(&isp_driver);
+}
+
+module_init(cam_isp_dev_init_module);
+module_exit(cam_isp_dev_exit_module);
+MODULE_DESCRIPTION("MSM ISP driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
new file mode 100644
index 0000000..95463ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_DEV_H_
+#define _CAM_ISP_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_isp_context.h"
+
+/**
+ * struct cam_isp_dev - Camera ISP V4l2 device node
+ *
+ * @sd:                    Commone camera subdevice node
+ * @ctx:                   Isp base context storage
+ * @ctx_isp:               Isp private context storage
+ *
+ */
+struct cam_isp_dev {
+	struct cam_subdev          sd;
+	struct cam_context         ctx[CAM_CTX_MAX];
+	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+};
+
+#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
new file mode 100644
index 0000000..4f5205e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_LOG_H_
+#define _CAM_ISP_LOG_H_
+
+#include <linux/kernel.h>
+
+#define ISP_TRACE_ENABLE			1
+
+#if (ISP_TRACE_ENABLE == 1)
+	#define ISP_TRACE(args...)		trace_printk(args)
+#else
+	#define ISP_TRACE(arg...)
+#endif
+
+#endif /* __CAM_ISP_LOG_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
new file mode 100644
index 0000000..9f2204b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_INTF_H_
+#define _CAM_ISP_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_mgr_intf.h"
+
+/**
+ *  enum cam_isp_hw_event_type - Collection of the ISP hardware events
+ */
+enum cam_isp_hw_event_type {
+	CAM_ISP_HW_EVENT_ERROR,
+	CAM_ISP_HW_EVENT_SOF,
+	CAM_ISP_HW_EVENT_REG_UPDATE,
+	CAM_ISP_HW_EVENT_EPOCH,
+	CAM_ISP_HW_EVENT_EOF,
+	CAM_ISP_HW_EVENT_DONE,
+	CAM_ISP_HW_EVENT_MAX
+};
+
+
+/**
+ * enum cam_isp_hw_err_type - Collection of the ISP error types for
+ *                         ISP hardware event CAM_ISP_HW_EVENT_ERROR
+ */
+enum cam_isp_hw_err_type {
+	CAM_ISP_HW_ERROR_NONE,
+	CAM_ISP_HW_ERROR_OVERFLOW,
+	CAM_ISP_HW_ERROR_P2I_ERROR,
+	CAM_ISP_HW_ERROR_VIOLATION,
+	CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
+	CAM_ISP_HW_ERROR_MAX,
+};
+
+
+/**
+ * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_sof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_reg_update_event_data - Event payload for
+ *                         CAM_HW_EVENT_REG_UPDATE
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_reg_update_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_epoch_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
+ *
+ * @num_handles:           Number of resource handeles
+ * @resource_handle:       Resource handle array
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_done_event_data {
+	uint32_t             num_handles;
+	uint32_t             resource_handle[
+				CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_eof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
+ *
+ * @error_type:            error type for the error event
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_error_event_data {
+	uint32_t             error_type;
+	struct timeval       timestamp;
+};
+
+/**
+ * cam_isp_hw_mgr_init()
+ *
+ * @brief:              Initialization function for the ISP hardware manager
+ *
+ * @of_node:            Device node input
+ * @hw_mgr:             Input/output structure for the ISP hardware manager
+ *                          initialization
+ *
+ */
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr);
+
+#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
new file mode 100644
index 0000000..4e6a06e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += ife_csid_hw/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
new file mode 100644
index 0000000..1615d21f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid_dev.o cam_ife_csid_soc.o cam_ife_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid170.o cam_ife_csid_lite170.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
new file mode 100644
index 0000000..bdd59d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
@@ -0,0 +1,60 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME                    "csid_170"
+#define CAM_CSID_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+	.csid_reg = &cam_ife_csid_170_reg_offset,
+	.hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid170_dt_match[] = {
+	{
+		.compatible = "qcom,csid170",
+		.data = &cam_ife_csid170_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid170_dt_match);
+
+static struct platform_driver cam_ife_csid170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid170_driver);
+}
+
+static void __exit cam_ife_csid170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid170_driver);
+}
+
+module_init(cam_ife_csid170_init_module);
+module_exit(cam_ife_csid170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
new file mode 100644
index 0000000..8ff2a55
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -0,0 +1,295 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_170_H_
+#define _CAM_IFE_CSID_170_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_ipp_reg_offset  cam_ife_csid_170_ipp_reg_offset = {
+	.csid_ipp_irq_status_addr            = 0x30,
+	.csid_ipp_irq_mask_addr              = 0x34,
+	.csid_ipp_irq_clear_addr             = 0x38,
+	.csid_ipp_irq_set_addr               = 0x3c,
+
+	.csid_ipp_cfg0_addr                  = 0x200,
+	.csid_ipp_cfg1_addr                  = 0x204,
+	.csid_ipp_ctrl_addr                  = 0x208,
+	.csid_ipp_frm_drop_pattern_addr      = 0x20c,
+	.csid_ipp_frm_drop_period_addr       = 0x210,
+	.csid_ipp_irq_subsample_pattern_addr = 0x214,
+	.csid_ipp_irq_subsample_period_addr  = 0x218,
+	.csid_ipp_hcrop_addr                 = 0x21c,
+	.csid_ipp_vcrop_addr                 = 0x220,
+	.csid_ipp_pix_drop_pattern_addr      = 0x224,
+	.csid_ipp_pix_drop_period_addr       = 0x228,
+	.csid_ipp_line_drop_pattern_addr     = 0x22c,
+	.csid_ipp_line_drop_period_addr      = 0x230,
+	.csid_ipp_rst_strobes_addr           = 0x240,
+	.csid_ipp_status_addr                = 0x254,
+	.csid_ipp_misr_val_addr              = 0x258,
+	.csid_ipp_format_measure_cfg0_addr   = 0x270,
+	.csid_ipp_format_measure_cfg1_addr   = 0x274,
+	.csid_ipp_format_measure0_addr       = 0x278,
+	.csid_ipp_format_measure1_addr       = 0x27c,
+	.csid_ipp_format_measure2_addr       = 0x280,
+	.csid_ipp_timestamp_curr0_sof_addr   = 0x290,
+	.csid_ipp_timestamp_curr1_sof_addr   = 0x294,
+	.csid_ipp_timestamp_perv0_sof_addr   = 0x298,
+	.csid_ipp_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_ipp_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_ipp_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_ipp_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_ipp_timestamp_perv1_eof_addr   = 0x2ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+			cam_ife_csid_170_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+			cam_ife_csid_170_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+static struct cam_ife_csid_common_reg_offset
+			cam_ife_csid_170_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 3,
+	.no_pix                                       = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_170_csi2_reg_offset,
+	.ipp_reg          = &cam_ife_csid_170_ipp_reg_offset,
+	.rdi_reg = {
+		&cam_ife_csid_170_rdi_0_reg_offset,
+		&cam_ife_csid_170_rdi_1_reg_offset,
+		&cam_ife_csid_170_rdi_2_reg_offset,
+		NULL,
+		},
+	.tpg_reg = &cam_ife_csid_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
new file mode 100644
index 0000000..6306df3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -0,0 +1,2554 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_ife_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+
+/* Timeout value in msec */
+#define IFE_CSID_TIMEOUT                               1000
+
+/* TPG VC/DT values */
+#define CAM_IFE_CSID_TPG_VC_VAL                        0xA
+#define CAM_IFE_CSID_TPG_DT_VAL                        0x2B
+
+/* Timeout values in usec */
+#define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
+#define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
+
+static int cam_ife_csid_is_ipp_format_supported(
+				uint32_t decode_fmt)
+{
+	int rc = -EINVAL;
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_DPCM_10_6_10:
+	case CAM_FORMAT_DPCM_10_8_10:
+	case CAM_FORMAT_DPCM_12_6_12:
+	case CAM_FORMAT_DPCM_12_8_12:
+	case CAM_FORMAT_DPCM_14_8_14:
+	case CAM_FORMAT_DPCM_14_10_14:
+		rc = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int cam_ife_csid_get_format(uint32_t  res_id,
+	uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
+		res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
+		*path_fmt = 0xf;
+		return 0;
+	}
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*path_fmt  = 0;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*path_fmt  = 1;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*path_fmt  = 2;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*path_fmt  = 3;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		*path_fmt  = 4;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		*path_fmt  = 5;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_20:
+		*path_fmt  = 6;
+		*plain_fmt = 2;
+		break;
+	case CAM_FORMAT_DPCM_10_6_10:
+		*path_fmt  = 7;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_10_8_10:
+		*path_fmt  = 8;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_6_12:
+		*path_fmt  = 9;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_8_12:
+		*path_fmt  = 0xA;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_8_14:
+		*path_fmt  = 0xB;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_10_14:
+		*path_fmt  = 0xC;
+		*plain_fmt = 1;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported format\n",
+		__func__, __LINE__, decode_fmt);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
+	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
+	uint32_t res_type)
+{
+	int  rc = 0;
+	struct cam_ife_csid_cid_data    *cid_data;
+	uint32_t  i = 0, j = 0;
+
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		if (csid_hw->cid_res[i].res_state >=
+			CAM_ISP_RESOURCE_STATE_RESERVED) {
+			cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[i].res_priv;
+			if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+				if (cid_data->tpg_set) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			} else {
+				if (cid_data->vc == vc && cid_data->dt == dt) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (i == CAM_IFE_CSID_CID_RES_MAX) {
+		if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			pr_err("%s:%d:CSID:%d TPG CID not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+
+		for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
+			if (csid_hw->cid_res[j].res_state ==
+				CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+				cid_data = (struct cam_ife_csid_cid_data *)
+					csid_hw->cid_res[j].res_priv;
+				cid_data->vc  = vc;
+				cid_data->dt  = dt;
+				cid_data->cnt = 1;
+				csid_hw->cid_res[j].res_state =
+					CAM_ISP_RESOURCE_STATE_RESERVED;
+				*res = &csid_hw->cid_res[j];
+				CDBG("%s:%d:CSID:%d CID %d allocated\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					csid_hw->cid_res[j].res_id);
+				break;
+			}
+		}
+
+		if (j == CAM_IFE_CSID_CID_RES_MAX) {
+			pr_err("%s:%d:CSID:%d Free cid is not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
+{
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	int rc = 0;
+	uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
+		irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	init_completion(&csid_hw->csid_top_complete);
+
+	/* Save interrupt mask registers values*/
+	irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+	}
+
+	/* Mask all interrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* enable the IPP and RDI format measure */
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
+
+	/* perform the top CSID HW reset */
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	CDBG("%s:%d: Waiting for reset complete from irq handler\n",
+		__func__, __LINE__);
+
+	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	/*restore all interrupt masks */
+	cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_reset_cfg_args  *reset)
+{
+	int rc = 0;
+	struct cam_hw_soc_info              *soc_info;
+	struct cam_isp_resource_node        *res;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t  reset_strb_addr, reset_strb_val, val, id;
+	struct completion  *complete;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res      = reset->node_res;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		if (!csid_reg->ipp_reg) {
+			pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
+		complete = &csid_hw->csid_ipp_complete;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	} else {
+		id = res->res_id;
+		if (!csid_reg->rdi_reg[id]) {
+			pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr =
+			csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+		complete =
+			&csid_hw->csid_rdin_complete[id];
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	init_completion(complete);
+	reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
+
+	/* Enable the Test gen before reset */
+	cam_io_w_mb(1,	csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+	/* Reset the corresponding ife csid path */
+	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+				reset_strb_addr);
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id,  rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable Test Gen after reset*/
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+end:
+	return rc;
+
+}
+
+static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
+{
+	int rc = 0;
+	struct cam_ife_csid_cid_data       *cid_data;
+
+	CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		cid_reserv->in_port->res_type,
+		cid_reserv->in_port->lane_type,
+		cid_reserv->in_port->lane_num,
+		cid_reserv->in_port->dt,
+		cid_reserv->in_port->vc);
+
+	if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
+		pr_err("%s:%d:CSID:%d  Invalid phy sel %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d  Invalid lane type %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
+		cid_reserv->in_port->lane_num > 4) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+	if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+		cid_reserv->in_port->lane_num > 3) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* CSID  CSI2 v2.0 supports 31 vc  */
+	if (cid_reserv->in_port->dt > 0x3f ||
+		cid_reserv->in_port->vc > 0x1f) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
+		(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
+		cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
+		pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->format);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (csid_hw->csi2_reserve_cnt) {
+		/* current configure res type should match requested res type */
+		if (csid_hw->res_type != cid_reserv->in_port->res_type) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+			if (csid_hw->csi2_rx_cfg.lane_cfg !=
+				cid_reserv->in_port->lane_cfg  ||
+				csid_hw->csi2_rx_cfg.lane_type !=
+				cid_reserv->in_port->lane_type ||
+				csid_hw->csi2_rx_cfg.lane_num !=
+				cid_reserv->in_port->lane_num) {
+				rc = -EINVAL;
+				goto end;
+				}
+		} else {
+			if (csid_hw->tpg_cfg.decode_fmt !=
+				cid_reserv->in_port->format     ||
+				csid_hw->tpg_cfg.width !=
+				cid_reserv->in_port->left_width ||
+				csid_hw->tpg_cfg.height !=
+				cid_reserv->in_port->height     ||
+				csid_hw->tpg_cfg.test_pattern !=
+				cid_reserv->in_port->test_pattern) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	if (!csid_hw->csi2_reserve_cnt) {
+		csid_hw->res_type = cid_reserv->in_port->res_type;
+		/* Take the first CID resource*/
+		csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+		cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[0].res_priv;
+
+		csid_hw->csi2_rx_cfg.lane_cfg =
+			cid_reserv->in_port->lane_cfg;
+		csid_hw->csi2_rx_cfg.lane_type =
+			cid_reserv->in_port->lane_type;
+		csid_hw->csi2_rx_cfg.lane_num =
+			cid_reserv->in_port->lane_num;
+
+		if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			csid_hw->csi2_rx_cfg.phy_sel = 0;
+			if (cid_reserv->in_port->format >
+			    CAM_FORMAT_MIPI_RAW_16) {
+				pr_err("%s:%d: Wrong TPG format\n", __func__,
+					__LINE__);
+				rc = -EINVAL;
+				goto end;
+			}
+			csid_hw->tpg_cfg.decode_fmt =
+				cid_reserv->in_port->format;
+			csid_hw->tpg_cfg.width =
+				cid_reserv->in_port->left_width;
+			csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
+			csid_hw->tpg_cfg.test_pattern =
+				cid_reserv->in_port->test_pattern;
+			cid_data->tpg_set = 1;
+		} else {
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->in_port->res_type & 0xFF) - 1;
+		}
+
+		cid_data->vc = cid_reserv->in_port->vc;
+		cid_data->dt = cid_reserv->in_port->dt;
+		cid_data->cnt = 1;
+		cid_reserv->node_res = &csid_hw->cid_res[0];
+		csid_hw->csi2_reserve_cnt++;
+
+		CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->node_res->res_id);
+	} else {
+		rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt,
+			cid_reserv->in_port->res_type);
+		/* if success then increment the reserve count */
+		if (!rc) {
+			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+				pr_err("%s:%d:CSID%d reserve cnt reached max\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx);
+				rc = -EINVAL;
+			} else {
+				csid_hw->csi2_reserve_cnt++;
+				CDBG("%s:%d:CSID:%d CID:%d acquired\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					cid_reserv->node_res->res_id);
+			}
+		}
+	}
+
+end:
+	return rc;
+}
+
+
+static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *reserve)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg    *path_data;
+	struct cam_isp_resource_node    *res;
+
+	/* CSID  CSI2 v2.0 supports 31 vc */
+	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
+		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			reserve->in_port->vc, reserve->in_port->dt,
+			reserve->sync_mode);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				csid_hw->ipp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_ife_csid_is_ipp_format_supported(
+				reserve->in_port->format)) {
+			pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* assign the IPP resource */
+		res = &csid_hw->ipp_res;
+		CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+			break;
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (csid_hw->rdi_res[reserve->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				reserve->res_id,
+				csid_hw->rdi_res[reserve->res_id].res_state);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			res = &csid_hw->rdi_res[reserve->res_id];
+			CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+		}
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, reserve->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	path_data = (struct cam_ife_csid_path_cfg   *)res->res_priv;
+
+	path_data->cid = reserve->cid;
+	path_data->decode_fmt = reserve->in_port->format;
+	path_data->master_idx = reserve->master_idx;
+	path_data->sync_mode = reserve->sync_mode;
+	path_data->height  = reserve->in_port->height;
+	path_data->start_line = reserve->in_port->line_start;
+	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
+		path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
+	} else {
+		path_data->dt = reserve->in_port->dt;
+		path_data->vc = reserve->in_port->vc;
+	}
+
+	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->width  = reserve->in_port->left_width;
+	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->width  = reserve->in_port->right_width;
+	} else
+		path_data->crop_enable = 0;
+
+	reserve->node_res = res;
+
+end:
+	return rc;
+}
+
+static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw  *csid_hw)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t i, status, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* overflow check before increment */
+	if (csid_hw->hw_info->open_count == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	csid_hw->hw_info->open_count++;
+	if (csid_hw->hw_info->open_count > 1) {
+		CDBG("%s:%d: CSID hw has already been enabled\n",
+			__func__, __LINE__);
+		return rc;
+	}
+
+	CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	rc = cam_ife_csid_enable_soc_resources(soc_info);
+	if (rc) {
+		pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+
+	CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_global_reset(csid_hw);
+	if (rc) {
+		pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
+			 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/*
+	 * Reset the SW registers
+	 * SW register reset also reset the mask irq, so poll the irq status
+	 * to check the reset complete.
+	 */
+	CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		pr_err("%s:%d: software register reset timeout.....\n",
+			__func__, __LINE__);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_hw_version_addr);
+	CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, val);
+
+	return 0;
+
+disable_soc:
+	cam_ife_csid_disable_soc_resources(soc_info);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+	csid_hw->hw_info->open_count--;
+	return rc;
+}
+
+static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = 0;
+	struct cam_hw_soc_info             *soc_info;
+	struct cam_ife_csid_reg_offset     *csid_reg;
+
+
+	/*  Decrement ref Count */
+	if (csid_hw->hw_info->open_count)
+		csid_hw->hw_info->open_count--;
+	if (csid_hw->hw_info->open_count)
+		return rc;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	/*disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_disable_soc_resources(soc_info);
+	if (rc)
+		pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	return rc;
+}
+
+
+static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	uint32_t  val = 0;
+	struct cam_hw_soc_info    *soc_info;
+
+	csid_hw->tpg_start_cnt++;
+	if (csid_hw->tpg_start_cnt == 1) {
+		/*Enable the TPG */
+		CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		soc_info = &csid_hw->hw_info->soc_info;
+		{
+			uint32_t val;
+			uint32_t i;
+			uint32_t base = 0x600;
+
+			CDBG("%s:%d: ================== TPG ===============\n",
+				__func__, __LINE__);
+			for (i = 0; i < 16; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== IPP ===============\n",
+				__func__, __LINE__);
+			base = 0x200;
+			for (i = 0; i < 10; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== RX ===============\n",
+				__func__, __LINE__);
+			base = 0x100;
+			for (i = 0; i < 5; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+		}
+
+		CDBG("%s:%d: =============== TPG control ===============\n",
+			__func__, __LINE__);
+		val = (4 << 20);
+		val |= (0x80 << 8);
+		val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
+		val |= 7;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_hw->csid_info->csid_reg->tpg_reg->
+			csid_tpg_ctrl_addr);
+
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
+		CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
+			0x600, val);
+	}
+
+	return 0;
+}
+
+static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_hw_soc_info    *soc_info;
+
+	if (csid_hw->tpg_start_cnt)
+		csid_hw->tpg_start_cnt--;
+
+	if (csid_hw->tpg_start_cnt)
+		return 0;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* disable the TPG */
+	if (!csid_hw->tpg_start_cnt) {
+		CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		/*stop the TPG */
+		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+	}
+
+	return 0;
+}
+
+
+static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_ife_csid_reg_offset *csid_reg;
+	struct cam_hw_soc_info         *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	CDBG("%s:%d CSID:%d TPG config\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx);
+
+	/* configure one DT, infinite frames */
+	val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
+
+	/* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
+	val = (0x740 << 12) | 0x740;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
+
+	cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
+
+	val = csid_hw->tpg_cfg.width << 16 |
+		csid_hw->tpg_cfg.height;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
+
+	cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
+
+	/*
+	 * decode_fmt is the same as the input resource format.
+	 * it is one larger than the register spec format.
+	 */
+	val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
+
+	/* select rotate period as  5 frame */
+	val =  5 << 8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
+	/* config pix pattern */
+	cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_enable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_cid_data         *cid_data;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	/* overflow check before increment */
+	if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
+
+	res->res_state  = CAM_ISP_RESOURCE_STATE_STREAMING;
+	csid_hw->csi2_cfg_cnt++;
+	if (csid_hw->csi2_cfg_cnt > 1)
+		return rc;
+
+	/* rx cfg0 */
+	val = (csid_hw->csi2_rx_cfg.lane_num - 1)  |
+		(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+		(csid_hw->csi2_rx_cfg.lane_type << 24);
+	val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+	/* rx cfg1*/
+	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+	/* if VC value is more than 3 than set full width of VC */
+	if (cid_data->vc > 3)
+		val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
+	/* enable packet ecc correction */
+	val |= 1;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		/* Config the TPG */
+		rc = cam_ife_csid_config_tpg(csid_hw, res);
+		if (rc) {
+			res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+			return rc;
+		}
+	}
+
+	/*Enable the CSI2 rx inerrupts */
+	val = CSID_CSI2_RX_INFO_RST_DONE |
+		CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
+		pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	if (csid_hw->csi2_cfg_cnt)
+		csid_hw->csi2_cfg_cnt--;
+
+	if (csid_hw->csi2_cfg_cnt)
+		return 0;
+
+	/*Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static int cam_ife_csid_init_config_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_format = 0, val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg  *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_format);
+	if (rc)
+		return rc;
+
+	/**
+	 * configure the IPP and enable the time stamp capture.
+	 * enable the HW measrurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 1) | 1;
+	val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_vcrop_addr);
+	}
+
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
+	/* set irq sub sample pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
+
+	/*Set master or slave IPP */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* Enable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		rc = -EINVAL;
+	}
+
+	/* Disable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info            *soc_info;
+	struct cam_ife_csid_path_cfg      *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+
+	/*Resume at frame boundary */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
+		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	}
+	/* for slave mode, not need to resume for slave device */
+
+	/* Enable the required ipp interrupts */
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd       stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_path_cfg         *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		/* configure Halt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val &= ~0x3;
+		val |= stop_cmd;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+		cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* For slave mode, halt command should take it from master */
+
+	/* Enable the EOF interrupt for resume at boundary case */
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		init_completion(&csid_hw->csid_ipp_complete);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_init_config_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	id = res->res_id;
+	if (!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+		return -EINVAL;
+	}
+
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_fmt);
+	if (rc)
+		return rc;
+
+	/**
+	 * RDI path config and enable the time stamp capture
+	 * Enable the measurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+		(path_data->crop_enable & 1 <<
+			csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 2) | 3;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+	}
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
+	/* set IRQ sum sabmple */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
+
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
+
+	/* Configure the halt mode */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the RPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	/* Disable the RDI path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t id, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	/*resume at frame boundary */
+	cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the required RDI interrupts */
+	val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+
+static int cam_ife_csid_disable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd                stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
+		!csid_reg->rdi_reg[res->res_id]) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	init_completion(&csid_hw->csid_rdin_complete[id]);
+
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	/*Halt the RDI path */
+	cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_get_time_stamp(
+		struct cam_ife_csid_hw   *csid_hw, void *cmd_args)
+{
+	struct cam_csid_get_time_stamp_args  *time_stamp;
+	struct cam_isp_resource_node         *res;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  time_32, id;
+
+	time_stamp = (struct cam_csid_get_time_stamp_args  *)cmd_args;
+	res = time_stamp->node_res;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	} else {
+		id = res->res_id;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	}
+
+	return 0;
+}
+static int cam_ife_csid_res_wait_for_halt(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	struct completion  *complete;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+		complete = &csid_hw->csid_ipp_complete;
+	else
+		complete =  &csid_hw->csid_rdin_complete[res->res_id];
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, rc);
+		if (rc == 0)
+			/* continue even have timeout */
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable the interrupt */
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		id = res->res_id;
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+			CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+	/* set state to init HW */
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	return rc;
+}
+
+static int cam_ife_csid_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw_caps     *hw_caps;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
+
+	hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
+	hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
+	hw_caps->major_version = csid_reg->cmn_reg->major_version;
+	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+	CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+		hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
+		hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_ife_csid_reset(void *hw_priv,
+	void *reset_args, uint32_t arg_size)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_csid_reset_cfg_args  *reset;
+	int rc = 0;
+
+	if (!hw_priv || !reset_args || (arg_size !=
+		sizeof(struct cam_csid_reset_cfg_args))) {
+		pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reset   = (struct cam_csid_reset_cfg_args  *)reset_args;
+
+	switch (reset->reset_type) {
+	case CAM_IFE_CSID_RESET_GLOBAL:
+		rc = cam_ife_csid_global_reset(csid_hw);
+		break;
+	case CAM_IFE_CSID_RESET_PATH:
+		rc = cam_ife_csid_path_reset(csid_hw, reset);
+		break;
+	default:
+		pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
+			__LINE__, reset->reset_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_csid_hw_reserve_resource_args  *reserv;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_csid_hw_reserve_resource_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reserv = (struct cam_csid_hw_reserve_resource_args  *)reserve_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (reserv->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		rc = cam_ife_csid_path_reserve(csid_hw, reserv);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_isp_resource_node    *res;
+	struct cam_ife_csid_cid_data    *cid_data;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)release_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CDBG("%s:%d:CSID:%d res type:%d Res %d  in released state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		goto end;
+	}
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		cid_data = (struct cam_ife_csid_cid_data    *) res->res_priv;
+		if (cid_data->cnt)
+			cid_data->cnt--;
+
+		if (!cid_data->cnt)
+			res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		if (csid_hw->csi2_reserve_cnt)
+			csid_hw->csi2_reserve_cnt--;
+
+		if (!csid_hw->csi2_reserve_cnt)
+			memset(&csid_hw->csi2_rx_cfg, 0,
+				sizeof(struct cam_ife_csid_csi2_rx_cfg));
+
+		CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		break;
+	}
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res      = (struct cam_isp_resource_node *)init_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+
+	/* Initialize the csid hardware */
+	rc = cam_ife_csid_enable_hw(csid_hw);
+	if (rc)
+		goto end;
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_enable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+
+	if (rc)
+		cam_ife_csid_disable_hw(csid_hw);
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	res = (struct cam_isp_resource_node *)deinit_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		goto end;
+	}
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_disable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		goto end;
+	}
+
+	/* Disable CSID HW */
+	cam_ife_csid_disable_hw(csid_hw);
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)start_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		if (csid_hw->res_type ==  CAM_ISP_IFE_IN_RES_TPG)
+			rc = cam_ife_csid_tpg_start(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res;
+	struct cam_csid_hw_stop_args         *csid_stop;
+	uint32_t  i;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	csid_stop = (struct cam_csid_hw_stop_args  *) stop_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	/* Stop the resource first */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_CID:
+			if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+				rc = cam_ife_csid_tpg_stop(csid_hw, res);
+			break;
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+				rc = cam_ife_csid_disable_ipp_path(csid_hw,
+						res, csid_stop->stop_cmd);
+			else
+				rc = cam_ife_csid_disable_rdi_path(csid_hw,
+						res, csid_stop->stop_cmd);
+
+			break;
+		default:
+			pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
+				__LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
+	/*wait for the path to halt */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
+			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+	}
+
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+
+}
+
+static int cam_ife_csid_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+
+	return -EINVAL;
+}
+
+static int cam_ife_csid_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+	return -EINVAL;
+}
+
+static int cam_ife_csid_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+
+	if (!hw_priv || !cmd_args) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (cmd_type) {
+	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
+		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+
+	return rc;
+
+}
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
+		irq_status_rdi[4];
+
+	csid_hw = (struct cam_ife_csid_hw *)data;
+
+	CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	if (!data) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return IRQ_HANDLED;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* read */
+	irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+	irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_status_addr);
+
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+	/* clear */
+	cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
+		irq_status_rx);
+	CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
+		irq_status_ipp);
+
+	if (irq_status_top) {
+		CDBG("%s:%d: CSID global reset complete......Exit\n",
+			__func__, __LINE__);
+		complete(&csid_hw->csid_top_complete);
+		return IRQ_HANDLED;
+	}
+
+
+	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
+		CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+		complete(&csid_hw->csid_csi2_complete);
+	}
+
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d TG OVER  FLOW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+
+	/*read the IPP errors */
+	if (csid_reg->cmn_reg->no_pix) {
+		/* IPP reset done bit */
+		if (irq_status_ipp &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s%d: CSID IPP reset complete\n",
+				__func__, __LINE__);
+			complete(&csid_hw->csid_ipp_complete);
+		}
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
+			CDBG("%s: CSID IPP SOF received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
+			CDBG("%s: CSID IPP SOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
+			CDBG("%s: CSID IPP EOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			CDBG("%s: CSID IPP EOF received\n", __func__);
+
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_ipp_complete);
+
+		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop IPP path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		if (irq_status_rdi[i] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s:%d: CSID rdi%d reset complete\n",
+				__func__, __LINE__, i);
+			complete(&csid_hw->csid_rdin_complete[i]);
+		}
+
+		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_rdin_complete[i]);
+
+		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop RDI path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+		}
+	}
+
+	CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+	return IRQ_HANDLED;
+}
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+	struct cam_ife_csid_path_cfg         *path_data;
+	struct cam_ife_csid_cid_data         *cid_data;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_ife_csid_hw               *ife_csid_hw = NULL;
+
+	if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
+		pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
+			csid_idx);
+		return rc;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *) csid_hw_intf->hw_priv;
+	ife_csid_hw  = (struct cam_ife_csid_hw  *) csid_hw_info->core_info;
+
+	ife_csid_hw->hw_intf = csid_hw_intf;
+	ife_csid_hw->hw_info = csid_hw_info;
+
+	CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+		ife_csid_hw->hw_intf->hw_type, csid_idx);
+
+
+	ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ife_csid_hw->hw_info->hw_mutex);
+	spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
+	init_completion(&ife_csid_hw->hw_info->hw_complete);
+
+	init_completion(&ife_csid_hw->csid_top_complete);
+	init_completion(&ife_csid_hw->csid_csi2_complete);
+	init_completion(&ife_csid_hw->csid_ipp_complete);
+	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
+		init_completion(&ife_csid_hw->csid_rdin_complete[i]);
+
+
+	rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
+			cam_ife_csid_irq, ife_csid_hw);
+	if (rc < 0) {
+		pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
+			csid_idx);
+		goto err;
+	}
+
+	ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
+	ife_csid_hw->hw_intf->hw_ops.init        = cam_ife_csid_init_hw;
+	ife_csid_hw->hw_intf->hw_ops.deinit      = cam_ife_csid_deinit_hw;
+	ife_csid_hw->hw_intf->hw_ops.reset       = cam_ife_csid_reset;
+	ife_csid_hw->hw_intf->hw_ops.reserve     = cam_ife_csid_reserve;
+	ife_csid_hw->hw_intf->hw_ops.release     = cam_ife_csid_release;
+	ife_csid_hw->hw_intf->hw_ops.start       = cam_ife_csid_start;
+	ife_csid_hw->hw_intf->hw_ops.stop        = cam_ife_csid_stop;
+	ife_csid_hw->hw_intf->hw_ops.read        = cam_ife_csid_read;
+	ife_csid_hw->hw_intf->hw_ops.write       = cam_ife_csid_write;
+	ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
+
+	/*Initialize the CID resoure */
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
+		ife_csid_hw->cid_res[i].res_id = i;
+		ife_csid_hw->cid_res[i].res_state  =
+					CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
+					GFP_KERNEL);
+		if (!cid_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->cid_res[i].res_priv = cid_data;
+	}
+
+	/* Initialize the IPP resources */
+	if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
+		ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+		ife_csid_hw->ipp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->ipp_res.res_priv = path_data;
+	}
+
+	/* Initialize the RDI resource */
+	for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+				i++) {
+		/* res type is from RDI 0 to RDI3 */
+		ife_csid_hw->rdi_res[i].res_type =
+			CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->rdi_res[i].res_id = i;
+		ife_csid_hw->rdi_res[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->rdi_res[i].res_priv = path_data;
+	}
+
+	return 0;
+err:
+	if (rc) {
+		kfree(ife_csid_hw->ipp_res.res_priv);
+		for (i = 0; i <
+			ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
+			kfree(ife_csid_hw->rdi_res[i].res_priv);
+
+		for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+			kfree(ife_csid_hw->cid_res[i].res_priv);
+
+	}
+
+	return rc;
+}
+
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+
+	if (!ife_csid_hw) {
+		pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(ife_csid_hw->ipp_res.res_priv);
+	for (i = 0; i <
+		ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+		i++) {
+		kfree(ife_csid_hw->rdi_res[i].res_priv);
+	}
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+		kfree(ife_csid_hw->cid_res[i].res_priv);
+
+
+	return 0;
+}
+
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
new file mode 100644
index 0000000..d36c576
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -0,0 +1,419 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_HW_H_
+#define _CAM_IFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_ife_csid_soc.h"
+
+#define CAM_IFE_CSID_HW_RES_MAX      4
+#define CAM_IFE_CSID_CID_RES_MAX     4
+#define CAM_IFE_CSID_RDI_MAX         4
+
+#define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
+#define CSID_CSI2_RX_NFO_PHY_DL1_EOT_CAPTURED     BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
+#define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
+#define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
+#define CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED    BIT(5)
+#define CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED    BIT(6)
+#define CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED    BIT(7)
+#define CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED       BIT(8)
+#define CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED      BIT(9)
+#define CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED   BIT(10)
+#define CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION     BIT(11)
+#define CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION     BIT(12)
+#define CSID_CSI2_RX_ERROR_CPHY_PH_CRC            BIT(13)
+#define CSID_CSI2_RX_WARNING_ECC                  BIT(14)
+#define CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW    BIT(15)
+#define CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW    BIT(16)
+#define CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW    BIT(17)
+#define CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW    BIT(18)
+#define CSID_CSI2_RX_ERROR_CRC                    BIT(19)
+#define CSID_CSI2_RX_ERROR_ECC                    BIT(20)
+#define CSID_CSI2_RX_ERROR_MMAPPED_VC_DT          BIT(21)
+#define CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT         BIT(22)
+#define CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW       BIT(23)
+#define CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME        BIT(24)
+#define CSID_CSI2_RX_INFO_TG_DONE                 BIT(25)
+#define CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW       BIT(26)
+#define CSID_CSI2_RX_INFO_RST_DONE                BIT(27)
+
+#define CSID_PATH_INFO_RST_DONE                   BIT(1)
+#define CSID_PATH_ERROR_FIFO_OVERFLOW             BIT(2)
+#define CSID_PATH_INFO_SUBSAMPLED_EOF             BIT(3)
+#define CSID_PATH_INFO_SUBSAMPLED_SOF             BIT(4)
+#define CSID_PATH_INFO_FRAME_DROP_EOF             BIT(5)
+#define CSID_PATH_INFO_FRAME_DROP_EOL             BIT(6)
+#define CSID_PATH_INFO_FRAME_DROP_SOL             BIT(7)
+#define CSID_PATH_INFO_FRAME_DROP_SOF             BIT(8)
+#define CSID_PATH_INFO_INPUT_EOF                  BIT(9)
+#define CSID_PATH_INFO_INPUT_EOL                  BIT(10)
+#define CSID_PATH_INFO_INPUT_SOL                  BIT(11)
+#define CSID_PATH_INFO_INPUT_SOF                  BIT(12)
+#define CSID_PATH_ERROR_PIX_COUNT                 BIT(13)
+#define CSID_PATH_ERROR_LINE_COUNT                BIT(14)
+
+enum cam_csid_path_halt_mode {
+	CSID_HALT_MODE_INTERNAL,
+	CSID_HALT_MODE_GLOBAL,
+	CSID_HALT_MODE_MASTER,
+	CSID_HALT_MODE_SLAVE,
+};
+
+
+struct cam_ife_csid_ipp_reg_offset {
+	/*Image pixel path register offsets*/
+	uint32_t csid_ipp_irq_status_addr;
+	uint32_t csid_ipp_irq_mask_addr;
+	uint32_t csid_ipp_irq_clear_addr;
+	uint32_t csid_ipp_irq_set_addr;
+
+	uint32_t csid_ipp_cfg0_addr;
+	uint32_t csid_ipp_cfg1_addr;
+	uint32_t csid_ipp_ctrl_addr;
+	uint32_t csid_ipp_frm_drop_pattern_addr;
+	uint32_t csid_ipp_frm_drop_period_addr;
+	uint32_t csid_ipp_irq_subsample_pattern_addr;
+	uint32_t csid_ipp_irq_subsample_period_addr;
+	uint32_t csid_ipp_hcrop_addr;
+	uint32_t csid_ipp_vcrop_addr;
+	uint32_t csid_ipp_pix_drop_pattern_addr;
+	uint32_t csid_ipp_pix_drop_period_addr;
+	uint32_t csid_ipp_line_drop_pattern_addr;
+	uint32_t csid_ipp_line_drop_period_addr;
+	uint32_t csid_ipp_rst_strobes_addr;
+	uint32_t csid_ipp_status_addr;
+	uint32_t csid_ipp_misr_val_addr;
+	uint32_t csid_ipp_format_measure_cfg0_addr;
+	uint32_t csid_ipp_format_measure_cfg1_addr;
+	uint32_t csid_ipp_format_measure0_addr;
+	uint32_t csid_ipp_format_measure1_addr;
+	uint32_t csid_ipp_format_measure2_addr;
+	uint32_t csid_ipp_timestamp_curr0_sof_addr;
+	uint32_t csid_ipp_timestamp_curr1_sof_addr;
+	uint32_t csid_ipp_timestamp_perv0_sof_addr;
+	uint32_t csid_ipp_timestamp_perv1_sof_addr;
+	uint32_t csid_ipp_timestamp_curr0_eof_addr;
+	uint32_t csid_ipp_timestamp_curr1_eof_addr;
+	uint32_t csid_ipp_timestamp_perv0_eof_addr;
+	uint32_t csid_ipp_timestamp_perv1_eof_addr;
+
+	/* configuration */
+	uint32_t  pix_store_en_shift_val;
+};
+
+struct cam_ife_csid_rdi_reg_offset {
+	uint32_t csid_rdi_irq_status_addr;
+	uint32_t csid_rdi_irq_mask_addr;
+	uint32_t csid_rdi_irq_clear_addr;
+	uint32_t csid_rdi_irq_set_addr;
+
+	/*RDI N register address */
+	uint32_t csid_rdi_cfg0_addr;
+	uint32_t csid_rdi_cfg1_addr;
+	uint32_t csid_rdi_ctrl_addr;
+	uint32_t csid_rdi_frm_drop_pattern_addr;
+	uint32_t csid_rdi_frm_drop_period_addr;
+	uint32_t csid_rdi_irq_subsample_pattern_addr;
+	uint32_t csid_rdi_irq_subsample_period_addr;
+	uint32_t csid_rdi_rpp_hcrop_addr;
+	uint32_t csid_rdi_rpp_vcrop_addr;
+	uint32_t csid_rdi_rpp_pix_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_pix_drop_period_addr;
+	uint32_t csid_rdi_rpp_line_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_line_drop_period_addr;
+	uint32_t csid_rdi_yuv_chroma_conversion_addr;
+	uint32_t csid_rdi_rst_strobes_addr;
+	uint32_t csid_rdi_status_addr;
+	uint32_t csid_rdi_misr_val0_addr;
+	uint32_t csid_rdi_misr_val1_addr;
+	uint32_t csid_rdi_misr_val2_addr;
+	uint32_t csid_rdi_misr_val3_addr;
+	uint32_t csid_rdi_format_measure_cfg0_addr;
+	uint32_t csid_rdi_format_measure_cfg1_addr;
+	uint32_t csid_rdi_format_measure0_addr;
+	uint32_t csid_rdi_format_measure1_addr;
+	uint32_t csid_rdi_format_measure2_addr;
+	uint32_t csid_rdi_timestamp_curr0_sof_addr;
+	uint32_t csid_rdi_timestamp_curr1_sof_addr;
+	uint32_t csid_rdi_timestamp_prev0_sof_addr;
+	uint32_t csid_rdi_timestamp_prev1_sof_addr;
+	uint32_t csid_rdi_timestamp_curr0_eof_addr;
+	uint32_t csid_rdi_timestamp_curr1_eof_addr;
+	uint32_t csid_rdi_timestamp_prev0_eof_addr;
+	uint32_t csid_rdi_timestamp_prev1_eof_addr;
+	uint32_t csid_rdi_byte_cntr_ping_addr;
+	uint32_t csid_rdi_byte_cntr_pong_addr;
+};
+
+struct cam_ife_csid_csi2_rx_reg_offset {
+	uint32_t csid_csi2_rx_irq_status_addr;
+	uint32_t csid_csi2_rx_irq_mask_addr;
+	uint32_t csid_csi2_rx_irq_clear_addr;
+	uint32_t csid_csi2_rx_irq_set_addr;
+	uint32_t csid_csi2_rx_cfg0_addr;
+	uint32_t csid_csi2_rx_cfg1_addr;
+	uint32_t csid_csi2_rx_capture_ctrl_addr;
+	uint32_t csid_csi2_rx_rst_strobes_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg0_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg1_addr; /* */
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_lane0_misr_addr;
+	uint32_t csid_csi2_rx_lane1_misr_addr;
+	uint32_t csid_csi2_rx_lane2_misr_addr;
+	uint32_t csid_csi2_rx_lane3_misr_addr;
+	uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+	uint32_t csid_csi2_rx_stats_ecc_addr;
+	uint32_t csid_csi2_rx_total_crc_err_addr;
+
+	/*configurations */
+	uint32_t csi2_rst_srb_all;
+	uint32_t csi2_rst_done_shift_val;
+	uint32_t csi2_irq_mask_all;
+	uint32_t csi2_misr_enable_shift_val;
+	uint32_t csi2_vc_mode_shift_val;
+};
+
+struct cam_ife_csid_csi2_tpg_reg_offset {
+	uint32_t csid_tpg_ctrl_addr;
+	uint32_t csid_tpg_vc_cfg0_addr;
+	uint32_t csid_tpg_vc_cfg1_addr;
+	uint32_t csid_tpg_lfsr_seed_addr;
+	uint32_t csid_tpg_dt_n_cfg_0_addr;
+	uint32_t csid_tpg_dt_n_cfg_1_addr;
+	uint32_t csid_tpg_dt_n_cfg_2_addr;
+	uint32_t csid_tpg_color_bars_cfg_addr;
+	uint32_t csid_tpg_color_box_cfg_addr;
+	uint32_t csid_tpg_common_gen_cfg_addr;
+	uint32_t csid_tpg_cgen_n_cfg_addr;
+	uint32_t csid_tpg_cgen_n_x0_addr;
+	uint32_t csid_tpg_cgen_n_x1_addr;
+	uint32_t csid_tpg_cgen_n_x2_addr;
+	uint32_t csid_tpg_cgen_n_xy_addr;
+	uint32_t csid_tpg_cgen_n_y1_addr;
+	uint32_t csid_tpg_cgen_n_y2_addr;
+
+	/*configurations */
+	uint32_t tpg_dtn_cfg_offset;
+	uint32_t tpg_cgen_cfg_offset;
+
+};
+struct cam_ife_csid_common_reg_offset {
+	/* MIPI CSID registers */
+	uint32_t csid_hw_version_addr;
+	uint32_t csid_cfg0_addr;
+	uint32_t csid_ctrl_addr;
+	uint32_t csid_reset_addr;
+	uint32_t csid_rst_strobes_addr;
+
+	uint32_t csid_test_bus_ctrl_addr;
+	uint32_t csid_top_irq_status_addr;
+	uint32_t csid_top_irq_mask_addr;
+	uint32_t csid_top_irq_clear_addr;
+	uint32_t csid_top_irq_set_addr;
+	uint32_t csid_irq_cmd_addr;
+
+	/*configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t no_rdis;
+	uint32_t no_pix;
+	uint32_t csid_rst_stb;
+	uint32_t csid_rst_stb_sw_all;
+	uint32_t path_rst_stb_all;
+	uint32_t path_rst_done_shift_val;
+	uint32_t path_en_shift_val;
+	uint32_t dt_id_shift_val;
+	uint32_t vc_shift_val;
+	uint32_t dt_shift_val;
+	uint32_t fmt_shift_val;
+	uint32_t plain_fmt_shit_val;
+	uint32_t crop_v_en_shift_val;
+	uint32_t crop_h_en_shift_val;
+	uint32_t crop_shift;
+	uint32_t ipp_irq_mask_all;
+	uint32_t rdi_irq_mask_all;
+};
+
+/**
+ * struct cam_ife_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg:  csid common registers info
+ * @ipp_reg:  ipp register offset information
+ * @rdi_reg:  rdi register offser information
+ *
+ */
+struct cam_ife_csid_reg_offset {
+	struct cam_ife_csid_common_reg_offset   *cmn_reg;
+	struct cam_ife_csid_csi2_rx_reg_offset  *csi2_reg;
+	struct cam_ife_csid_ipp_reg_offset      *ipp_reg;
+	struct cam_ife_csid_rdi_reg_offset      *rdi_reg[CAM_IFE_CSID_RDI_MAX];
+	struct cam_ife_csid_csi2_tpg_reg_offset *tpg_reg;
+};
+
+
+/**
+ * struct cam_ife_csid_hw_info- CSID HW info
+ *
+ * @csid_reg:        csid register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximim csid clock
+ *
+ */
+struct cam_ife_csid_hw_info {
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t                             hw_dts_version;
+	uint32_t                             csid_max_clk;
+
+};
+
+
+
+/**
+ * struct cam_ife_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel:     input resource type for sensor only
+ * @lane_type:   lane type: c-phy or d-phy
+ * @lane_num :   active lane number
+ * @lane_cfg:    lane configurations: 4 bits per lane
+ *
+ */
+struct cam_ife_csid_csi2_rx_cfg  {
+	uint32_t                        phy_sel;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+};
+
+/**
+ * struct             cam_ife_csid_tpg_cfg- csid tpg configuration data
+ * @width:            width
+ * @height:           height
+ * @test_pattern :    pattern
+ * @decode_format:    decode format
+ *
+ */
+struct cam_ife_csid_tpg_cfg  {
+	uint32_t                        width;
+	uint32_t                        height;
+	uint32_t                        test_pattern;
+	uint32_t                        decode_fmt;
+};
+
+/**
+ * struct cam_ife_csid_cid_data- cid configuration private data
+ *
+ * @vc:      virtual channel
+ * @dt:      Data type
+ * @cnt:     cid resource reference count.
+ * @tpg_set: tpg used for this cid resource
+ *
+ */
+struct cam_ife_csid_cid_data {
+	uint32_t                     vc;
+	uint32_t                     dt;
+	uint32_t                     cnt;
+	uint32_t                     tpg_set;
+};
+
+
+/**
+ * struct cam_ife_csid_path_cfg- csid path configuration details. It is stored
+ *                          as private data for IPP/ RDI paths
+ * @vc :            Virtual channel number
+ * @dt :            Data type number
+ * @cid             cid number, it is same as DT_ID number in HW
+ * @decode_fmt:     input decode format
+ * @crop_enable:    crop is enable or disabled, if enabled
+ *                  then remaining parameters are valid.
+ * @start_pixel:    start pixel
+ * @width:          width
+ * @start_line:     start line
+ * @height:         heigth
+ * @sync_mode:       Applicable for IPP/RDI path reservation
+ *                  Reserving the path for master IPP or slave IPP
+ *                  master (set value 1), Slave ( set value 2)
+ *                  for RDI, set  mode to none
+ * @master_idx:     For Slave reservation, Give master IFE instance Index.
+ *                  Slave will synchronize with master Start and stop operations
+ *
+ */
+struct cam_ife_csid_path_cfg {
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        cid;
+	uint32_t                        decode_fmt;
+	bool                            crop_enable;
+	uint32_t                        start_pixel;
+	uint32_t                        width;
+	uint32_t                        start_line;
+	uint32_t                        height;
+	enum cam_isp_hw_sync_mode       sync_mode;
+	uint32_t                        master_idx;
+};
+
+/**
+ * struct cam_ife_csid_hw- csid hw device resources data
+ *
+ * @hw_intf:                  contain the csid hw interface information
+ * @hw_info:                  csid hw device information
+ * @csid_info:                csid hw specific information
+ * @res_type:                 CSID in resource type
+ * @csi2_rx_cfg:              Csi2 rx decoder configuration for csid
+ * @tpg_cfg:                  TPG configuration
+ * @csi2_rx_reserve_cnt:      CSI2 reservations count value
+ * @csi2_cfg_cnt:             csi2 configuration count
+ * @tpg_start_cnt:            tpg start count
+ * @ipp_res:                  image pixel path resource
+ * @rdi_res:                  raw dump image path resources
+ * @cid_res:                  cid resources state
+ * @csid_top_reset_complete:  csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete:  ipp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ *
+ */
+struct cam_ife_csid_hw {
+	struct cam_hw_intf              *hw_intf;
+	struct cam_hw_info              *hw_info;
+	struct cam_ife_csid_hw_info     *csid_info;
+	uint32_t                         res_type;
+	struct cam_ife_csid_csi2_rx_cfg  csi2_rx_cfg;
+	struct cam_ife_csid_tpg_cfg      tpg_cfg;
+	uint32_t                         csi2_reserve_cnt;
+	uint32_t                         csi2_cfg_cnt;
+	uint32_t                         tpg_start_cnt;
+	struct cam_isp_resource_node     ipp_res;
+	struct cam_isp_resource_node     rdi_res[CAM_IFE_CSID_RDI_MAX];
+	struct cam_isp_resource_node     cid_res[CAM_IFE_CSID_CID_RES_MAX];
+	struct completion                csid_top_complete;
+	struct completion                csid_csi2_complete;
+	struct completion                csid_ipp_complete;
+	struct completion    csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+};
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx);
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw);
+
+#endif /* _CAM_IFE_CSID_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
new file mode 100644
index 0000000..003d83f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+#include "cam_ife_csid_hw_intf.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
+	0, 0, 0, 0};
+
+int cam_ife_csid_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_ife_csid_hw_info    *csid_hw_data = NULL;
+	uint32_t                        csid_dev_idx;
+	int                             rc = 0;
+
+	CDBG("%s:%d probe called\n", __func__, __LINE__);
+
+	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+	if (!csid_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!csid_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	csid_dev = kzalloc(sizeof(struct cam_ife_csid_hw), GFP_KERNEL);
+	if (!csid_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get ife csid hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+	/* get ife csid hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("%s:%d No matching table for the IFE CSID HW!\n",
+			__func__, __LINE__);
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	csid_hw_intf->hw_idx = csid_dev_idx;
+	csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID;
+	csid_hw_intf->hw_priv = csid_hw_info;
+
+	csid_hw_info->core_info = csid_dev;
+	csid_hw_info->soc_info.pdev = pdev;
+
+	csid_hw_data = (struct cam_ife_csid_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the ife hw probe init */
+	csid_dev->csid_info = csid_hw_data;
+
+	rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, csid_dev);
+	CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+		csid_hw_intf->hw_idx);
+
+
+	if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_RES_MAX)
+		cam_ife_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(csid_dev);
+free_hw_info:
+	kfree(csid_hw_info);
+free_hw_intf:
+	kfree(csid_hw_intf);
+err:
+	return rc;
+}
+
+int cam_ife_csid_remove(struct platform_device *pdev)
+{
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+
+	csid_dev = (struct cam_ife_csid_hw *)platform_get_drvdata(pdev);
+	csid_hw_intf = csid_dev->hw_intf;
+	csid_hw_info = csid_dev->hw_info;
+
+	CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+		csid_dev->hw_intf->hw_idx);
+
+	cam_ife_csid_hw_deinit(csid_dev);
+
+	/*release the csid device memory */
+	kfree(csid_dev);
+	kfree(csid_hw_info);
+	kfree(csid_hw_intf);
+	return 0;
+}
+
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_ife_csid_hw_list[hw_idx]) {
+		*ife_csid_hw = cam_ife_csid_hw_list[hw_idx];
+	} else {
+		*ife_csid_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
new file mode 100644
index 0000000..3b213e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_DEV_H_
+#define _CAM_IFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data);
+
+int cam_ife_csid_probe(struct platform_device *pdev);
+int cam_ife_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_IFE_CSID_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
new file mode 100644
index 0000000..4ed4da5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite170.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+
+#define CAM_CSID_LITE_DRV_NAME                    "csid_lite_170"
+#define CAM_CSID_LITE_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite170_hw_info = {
+	.csid_reg = &cam_ife_csid_lite_170_reg_offset,
+	.hw_dts_version = CAM_CSID_LITE_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid_lite170_dt_match[] = {
+	{
+		.compatible = "qcom,csid-lite170",
+		.data = &cam_ife_csid_lite170_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite170_dt_match);
+
+static struct platform_driver cam_ife_csid_lite170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_LITE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid_lite170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid_lite170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid_lite170_driver);
+}
+
+static void __exit cam_ife_csid_lite170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid_lite170_driver);
+}
+
+module_init(cam_ife_csid_lite170_init_module);
+module_exit(cam_ife_csid_lite170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
new file mode 100644
index 0000000..e857f8b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE170_H_
+#define _CAM_IFE_CSID_LITE170_H_
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_0_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x30,
+	.csid_rdi_irq_mask_addr                   = 0x34,
+	.csid_rdi_irq_clear_addr                  = 0x38,
+	.csid_rdi_irq_set_addr                    = 0x3c,
+	.csid_rdi_cfg0_addr                       = 0x200,
+	.csid_rdi_cfg1_addr                       = 0x204,
+	.csid_rdi_ctrl_addr                       = 0x208,
+	.csid_rdi_frm_drop_pattern_addr           = 0x20c,
+	.csid_rdi_frm_drop_period_addr            = 0x210,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x214,
+	.csid_rdi_irq_subsample_period_addr       = 0x218,
+	.csid_rdi_rpp_hcrop_addr                  = 0x21c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x220,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x224,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x228,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x22c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x230,
+	.csid_rdi_rst_strobes_addr                = 0x240,
+	.csid_rdi_status_addr                     = 0x250,
+	.csid_rdi_misr_val0_addr                  = 0x254,
+	.csid_rdi_misr_val1_addr                  = 0x258,
+	.csid_rdi_misr_val2_addr                  = 0x25c,
+	.csid_rdi_misr_val3_addr                  = 0x260,
+	.csid_rdi_format_measure_cfg0_addr        = 0x270,
+	.csid_rdi_format_measure_cfg1_addr        = 0x274,
+	.csid_rdi_format_measure0_addr            = 0x278,
+	.csid_rdi_format_measure1_addr            = 0x27c,
+	.csid_rdi_format_measure2_addr            = 0x280,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x290,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x294,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x298,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x29c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x2a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x2a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x2a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x2ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x2e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x2e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_1_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_2_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x434,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_3_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+	cam_ife_csid_lite_170_csi2_reg_offset = {
+
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+	cam_ife_csid_lite_170_tpg_reg_offset = {
+
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+
+static struct cam_ife_csid_common_reg_offset
+	cam_csid_lite_170_cmn_reg_offset = {
+
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 4,
+	.no_pix                                       = 0,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_lite_170_reg_offset = {
+	.cmn_reg          = &cam_csid_lite_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_lite_170_csi2_reg_offset,
+	.ipp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_lite_170_rdi_0_reg_offset,
+		&cam_ife_csid_lite_170_rdi_1_reg_offset,
+		&cam_ife_csid_lite_170_rdi_2_reg_offset,
+		&cam_ife_csid_lite_170_rdi_3_reg_offset,
+		},
+	.tpg_reg = &cam_ife_csid_lite_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
new file mode 100644
index 0000000..f07c45e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -0,0 +1,94 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_ife_csid_soc.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	struct csid_device_soc_info *csid_soc_info = NULL;
+	int rc = 0;
+
+	of_node = soc_info->pdev->dev.of_node;
+	csid_soc_info = (struct csid_device_soc_info *)soc_info->soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_ife_csid_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler,
+	void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ife_csid_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+
+	rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	CDBG("%s: mem_base is 0x%llx\n", __func__,
+		(uint64_t) soc_info->reg_map[0].mem_base);
+
+	return rc;
+}
+
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("%s: enable platform failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("%s: Disable platform failed\n", __func__);
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
new file mode 100644
index 0000000..218e05a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_SOC_H_
+#define _CAM_IFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage:       csi vdd voltage value
+ *
+ */
+struct csid_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_ife_csid_init_soc_resources()
+ *
+ * @brief:                 csid initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ * @csid_irq_handler:      irq handler function to be registered
+ * @irq_data:              irq data for the callback function
+ *
+ */
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data);
+
+/**
+ * cam_ife_csid_enable_soc_resources()
+ *
+ * @brief:                 csid soc resource enable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info);
+
+/**
+ * cam_ife_csid_disable_soc_resources()
+ *
+ * @brief:                 csid soc resource disable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
new file mode 100644
index 0000000..ecc6f0e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_CSID_HW_INTF_H__
+#define __CAM_CSID_HW_INTF_H__
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX IFE CSID instance */
+#define CAM_IFE_CSID_HW_NUM_MAX                        4
+
+
+/**
+ * enum cam_ife_pix_path_res_id - Specify the csid patch
+ */
+enum cam_ife_pix_path_res_id {
+	CAM_IFE_PIX_PATH_RES_RDI_0,
+	CAM_IFE_PIX_PATH_RES_RDI_1,
+	CAM_IFE_PIX_PATH_RES_RDI_2,
+	CAM_IFE_PIX_PATH_RES_RDI_3,
+	CAM_IFE_PIX_PATH_RES_IPP,
+	CAM_IFE_PIX_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_ife_cid_res_id - Specify the csid cid
+ */
+enum cam_ife_cid_res_id {
+	CAM_IFE_CSID_CID_0,
+	CAM_IFE_CSID_CID_1,
+	CAM_IFE_CSID_CID_2,
+	CAM_IFE_CSID_CID_3,
+	CAM_IFE_CSID_CID_MAX,
+};
+
+
+/**
+ * struct cam_ife_csid_hw_caps- get the CSID hw capability
+ * @no_rdis :       number of rdis supported by CSID HW device
+ * @no_pix:         number of pixel path supported by CSID HW device
+ * @major_version : major version
+ * @minor_version:  minor version
+ * @version_incr:   version increment
+ *
+ */
+struct cam_ife_csid_hw_caps {
+	uint32_t      no_rdis;
+	uint32_t      no_pix;
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+
+/**
+ * struct cam_csid_hw_reserve_resource- hw reserve
+ * @res_type :  reource type CID or PATH
+ *              if type is CID, then res_id is not required,
+ *              if type is path then res id need to be filled
+ * @res_id  :  res id to be reserved
+ * @in_port : input port resource info
+ * @sync_mode : Sync mode
+ *              Sync mode could be master, slave or none
+ * @master_idx: master device index to be configured in the slave path
+ *              for master path, this value is not required.
+ *              only slave need to configure the master index value
+ * @cid:        cid (DT_ID) value for path, this is applicable for CSID path
+ *              reserve
+ * @node_res :  reserved resource structure pointer
+ *
+ */
+struct cam_csid_hw_reserve_resource_args {
+	enum cam_isp_resource_type                res_type;
+	uint32_t                                  res_id;
+	struct cam_isp_in_port_info              *in_port;
+	enum cam_isp_hw_sync_mode                 sync_mode;
+	uint32_t                                  master_idx;
+	uint32_t                                  cid;
+	struct cam_isp_resource_node             *node_res;
+
+};
+
+
+/**
+ *  enum cam_ife_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_ife_csid_halt_cmd {
+	CAM_CSID_HALT_AT_FRAME_BOUNDARY,
+	CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+	CAM_CSID_HALT_IMMEDIATELY,
+	CAM_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ *             if stop command set to Halt immediately,driver will stop
+ *             path immediately, manager need to reset the path after HI
+ *             if stop command set to halt at frame boundary, driver will set
+ *             halt at frame boundary and wait for frame boundary
+ * @node_res :  reource pointer array( ie cid or CSID)
+ * @num_res :   number of resources to be stopped
+ *
+ */
+struct cam_csid_hw_stop_args {
+	enum cam_ife_csid_halt_cmd                stop_cmd;
+	struct cam_isp_resource_node            **node_res;
+	uint32_t                                  num_res;
+};
+
+/**
+ * enum cam_ife_csid_reset_type - Specify the reset type
+ */
+enum cam_ife_csid_reset_type {
+	CAM_IFE_CSID_RESET_GLOBAL,
+	CAM_IFE_CSID_RESET_PATH,
+	CAM_IFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_ife_csid_reset_cfg-  csid reset configuration
+ * @ reset_type : Global reset or path reset
+ * @res_node :   resource need to be reset
+ *
+ */
+struct cam_csid_reset_cfg_args {
+	enum cam_ife_csid_reset_type   reset_type;
+	struct cam_isp_resource_node  *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args-  time stamp capture arguments
+ * @res_node :   resource to get the time stamp
+ * @ time_stamp_val : captured time stamp
+ *
+ */
+struct cam_csid_get_time_stamp_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           time_stamp_val;
+};
+
+/**
+ * enum cam_ife_csid_cmd_type - Specify the csid command
+ */
+enum cam_ife_csid_cmd_type {
+	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_IFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_ife_csid_hw_init()
+ *
+ * @brief:               Initialize function for the CSID hardware
+ *
+ * @ife_csid_hw:         CSID hardware instance returned
+ * @hw_idex:             CSID hardware instance id
+ */
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx);
+
+#endif /* __CAM_CSID_HW_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index f8c864f..e6da6ca 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1 +1,9 @@
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o  cam_req_mgr_workq.o
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o \
+				cam_req_mgr_util.o \
+				cam_req_mgr_core.o \
+				cam_req_mgr_workq.o \
+				cam_mem_mgr.o \
+				cam_req_mgr_timer.o \
+				cam_req_mgr_debug.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
new file mode 100644
index 0000000..9c030ab
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -0,0 +1,968 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
+
+#ifdef CONFIG_MEM_MGR_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <asm/cacheflush.h>
+
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
+static struct cam_mem_table tbl;
+
+static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
+	uint64_t *vaddr,
+	size_t *len)
+{
+	*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
+	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+		pr_err("kernel map fail");
+		return -ENOSPC;
+	}
+
+	if (ion_handle_get_size(tbl.client, hdl, len)) {
+		pr_err("kernel get len failed");
+		ion_unmap_kernel(tbl.client, hdl);
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_get_dma_dir(uint32_t flags)
+{
+	int rc = -EINVAL;
+
+	if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
+		rc = DMA_TO_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
+		rc = DMA_FROM_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		rc = DMA_BIDIRECTIONAL;
+
+	return rc;
+}
+
+static int cam_mem_util_client_create(void)
+{
+	int rc = 0;
+
+	tbl.client = msm_ion_client_create("camera_global_pool");
+	if (IS_ERR_OR_NULL(tbl.client)) {
+		pr_err("fail to create client\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void cam_mem_util_client_destroy(void)
+{
+	ion_client_destroy(tbl.client);
+	tbl.client = NULL;
+}
+
+int cam_mem_mgr_init(void)
+{
+	int rc;
+	int i;
+	int bitmap_size;
+
+	memset(tbl.bufq, 0, sizeof(tbl.bufq));
+
+	rc = cam_mem_util_client_create();
+	if (rc < 0) {
+		pr_err("fail to create ion client\n");
+		goto client_fail;
+	}
+
+	bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
+	tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!tbl.bitmap) {
+		rc = -ENOMEM;
+		goto bitmap_fail;
+	}
+	tbl.bits = bitmap_size * BITS_PER_BYTE;
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].buf_handle = -1;
+	}
+	mutex_init(&tbl.m_lock);
+	return rc;
+
+bitmap_fail:
+	cam_mem_util_client_destroy();
+client_fail:
+	return rc;
+}
+
+static int cam_mem_mgr_cleanup_table(void)
+{
+	int i;
+
+	mutex_lock(&tbl.m_lock);
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		if (!tbl.bufq[i].active) {
+			CDBG("Buffer inactive at idx=%d, continuing\n", i);
+			continue;
+		} else {
+			pr_err("Active buffer at idx=%d, possible leak\n", i);
+		}
+
+		mutex_lock(&tbl.bufq[i].q_lock);
+		ion_free(tbl.client, tbl.bufq[i].i_hdl);
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].flags = 0;
+		tbl.bufq[i].buf_handle = -1;
+		tbl.bufq[i].vaddr = 0;
+		tbl.bufq[i].len = 0;
+		memset(tbl.bufq[i].hdls, 0,
+			sizeof(int32_t) * tbl.bufq[i].num_hdl);
+		tbl.bufq[i].num_hdl = 0;
+		tbl.bufq[i].i_hdl = NULL;
+		tbl.bufq[i].active = false;
+		mutex_unlock(&tbl.bufq[i].q_lock);
+		mutex_destroy(&tbl.bufq[i].q_lock);
+	}
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+
+	return 0;
+}
+
+void cam_mem_mgr_deinit(void)
+{
+	cam_mem_mgr_cleanup_table();
+	mutex_lock(&tbl.m_lock);
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	kfree(tbl.bitmap);
+	tbl.bitmap = NULL;
+	cam_mem_util_client_destroy();
+	mutex_unlock(&tbl.m_lock);
+	mutex_destroy(&tbl.m_lock);
+}
+
+static int32_t cam_mem_get_slot(void)
+{
+	int32_t idx;
+
+	mutex_lock(&tbl.m_lock);
+	idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		mutex_unlock(&tbl.m_lock);
+		return -ENOMEM;
+	}
+
+	set_bit(idx, tbl.bitmap);
+	tbl.bufq[idx].active = true;
+	mutex_init(&tbl.bufq[idx].q_lock);
+	mutex_unlock(&tbl.m_lock);
+
+	return idx;
+}
+
+static void cam_mem_put_slot(int32_t idx)
+{
+	mutex_lock(&tbl.m_lock);
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].active = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	mutex_destroy(&tbl.bufq[idx].q_lock);
+	clear_bit(idx, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+}
+
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr)
+{
+	int rc = 0, idx;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto handle_mismatch;
+	}
+
+	rc = cam_smmu_get_iova(mmu_handle,
+		tbl.bufq[idx].fd,
+		iova_ptr,
+		len_ptr);
+	if (rc < 0)
+		pr_err("fail to get buf hdl :%d", buf_handle);
+
+handle_mismatch:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_io_buf);
+
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
+{
+	int rc = 0;
+	int idx;
+	struct ion_handle *ion_hdl = NULL;
+	uint64_t kvaddr = 0;
+	size_t klen = 0;
+
+	if (!buf_handle || !vaddr_ptr || !len)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EPERM;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	ion_hdl = tbl.bufq[idx].i_hdl;
+	if (!ion_hdl) {
+		pr_err("Invalid ION handle\n");
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		if (!tbl.bufq[idx].kmdvaddr) {
+			rc = cam_mem_util_map_cpu_va(ion_hdl,
+				&kvaddr, &klen);
+			if (rc)
+				goto exit_func;
+			tbl.bufq[idx].kmdvaddr = kvaddr;
+		}
+	} else {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
+	*len = tbl.bufq[idx].len;
+
+exit_func:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_cpu_buf);
+
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
+{
+	int rc = 0, idx;
+	uint32_t ion_cache_ops;
+	unsigned long ion_flag = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+
+	if (!tbl.bufq[idx].active) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
+		&ion_flag);
+	if (rc) {
+		pr_err("cache get flags failed %d\n", rc);
+		goto fail;
+	}
+
+	if (ION_IS_CACHED(ion_flag)) {
+		switch (cmd->mem_cache_ops) {
+		case CAM_MEM_CLEAN_CACHE:
+			ion_cache_ops = ION_IOC_CLEAN_CACHES;
+			break;
+		case CAM_MEM_INV_CACHE:
+			ion_cache_ops = ION_IOC_INV_CACHES;
+			break;
+		case CAM_MEM_CLEAN_INV_CACHE:
+			ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+			break;
+		default:
+			pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		rc = msm_ion_do_cache_op(tbl.client,
+				tbl.bufq[idx].i_hdl,
+				(void *)tbl.bufq[idx].vaddr,
+				tbl.bufq[idx].len,
+				ion_cache_ops);
+		if (rc)
+			pr_err("cache operation failed %d\n", rc);
+	}
+fail:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
+
+static int cam_mem_util_get_ion_buffer(size_t len,
+	size_t align,
+	unsigned int heap_id_mask,
+	unsigned int flags,
+	struct ion_handle **hdl,
+	int *fd)
+{
+	int rc = 0;
+
+	*hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+	if (IS_ERR_OR_NULL(*hdl))
+		return -ENOMEM;
+
+	*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
+	if (*fd < 0) {
+		pr_err("dma buf get fd fail");
+		rc = -EINVAL;
+		goto get_fd_fail;
+	}
+
+	return rc;
+
+get_fd_fail:
+	ion_free(tbl.client, *hdl);
+	return rc;
+}
+
+static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
+	struct ion_handle **hdl,
+	int *fd)
+{
+	uint32_t heap_id;
+	uint32_t ion_flag = 0;
+	int rc;
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+	else
+		heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+	if (cmd->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	rc = cam_mem_util_get_ion_buffer(cmd->len,
+		cmd->align,
+		heap_id,
+		ion_flag,
+		hdl,
+		fd);
+
+	return rc;
+}
+
+
+static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	if (!cmd->flags) {
+		pr_err("Invalid flags\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+			CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		pr_err("Kernel mapping in secure mode not allowed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
+{
+	if (!cmd->flags) {
+		pr_err("Invalid flags\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+			CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		pr_err("Kernel mapping in secure mode not allowed");
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+		pr_err("Shared memory buffers are not allowed to be mapped\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_map_hw_va(uint32_t flags,
+	int32_t *mmu_hdls,
+	int32_t num_hdls,
+	int fd,
+	dma_addr_t *hw_vaddr,
+	size_t *len,
+	enum cam_smmu_region_id region)
+{
+	int i;
+	int rc = -1;
+	int dir = cam_mem_util_get_dma_dir(flags);
+
+	if (dir < 0) {
+		pr_err("fail to map DMA direction\n");
+		return dir;
+	}
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_sec_iova(mmu_hdls[i],
+				fd,
+				dir,
+				(dma_addr_t *)hw_vaddr,
+				len);
+
+			if (rc < 0) {
+				pr_err("Failed to securely map to smmu");
+				goto multi_map_fail;
+			}
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_iova(mmu_hdls[i],
+				fd,
+				dir,
+				(dma_addr_t *)hw_vaddr,
+				len,
+				region);
+
+			if (rc < 0) {
+				pr_err("Failed to map to smmu");
+				goto multi_map_fail;
+			}
+		}
+	}
+
+	return rc;
+multi_map_fail:
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+	else
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_iova(mmu_hdls[i],
+				fd,
+				CAM_SMMU_REGION_IO);
+	return rc;
+
+}
+
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	int rc;
+	int32_t idx;
+	struct ion_handle *ion_hdl;
+	int ion_fd;
+	dma_addr_t hw_vaddr = 0;
+	size_t len;
+
+	if (!cmd) {
+		pr_err(" Invalid argument\n");
+		return -EINVAL;
+	}
+	len = cmd->len;
+
+	rc = cam_mem_util_check_flags(cmd);
+	if (rc) {
+		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		return rc;
+	}
+
+	rc = cam_mem_util_ion_alloc(cmd,
+		&ion_hdl,
+		&ion_fd);
+	if (rc) {
+		pr_err("Ion allocation failed\n");
+		return rc;
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+		cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+
+		enum cam_smmu_region_id region;
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
+
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+			region = CAM_SMMU_REGION_SHARED;
+
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			ion_fd,
+			&hw_vaddr,
+			&len,
+			region);
+		if (rc)
+			goto map_hw_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].i_hdl = ion_hdl;
+	tbl.bufq[idx].len = cmd->len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.fd = tbl.bufq[idx].fd;
+	cmd->out.vaddr = 0;
+
+	CDBG("buf handle: %x, fd: %d, len: %zu\n",
+		cmd->out.buf_handle, cmd->out.fd,
+		tbl.bufq[idx].len);
+
+	return rc;
+
+map_hw_fail:
+	cam_mem_put_slot(idx);
+slot_fail:
+	ion_free(tbl.client, ion_hdl);
+	return rc;
+}
+
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
+{
+	int32_t idx;
+	int rc;
+	struct ion_handle *ion_hdl;
+	dma_addr_t hw_vaddr = 0;
+	size_t len = 0;
+
+	if (!cmd || (cmd->fd < 0)) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
+		return -EINVAL;
+
+	rc = cam_mem_util_check_map_flags(cmd);
+	if (rc) {
+		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		return rc;
+	}
+
+	ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
+	if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
+		pr_err("Failed to import ion fd\n");
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) {
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			cmd->fd,
+			&hw_vaddr,
+			&len,
+			CAM_SMMU_REGION_IO);
+		if (rc)
+			goto map_fail;
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto map_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = cmd->fd;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].i_hdl = ion_hdl;
+	tbl.bufq[idx].len = len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = true;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.vaddr = 0;
+
+	return rc;
+
+map_fail:
+	ion_free(tbl.client, ion_hdl);
+	return rc;
+}
+
+static int cam_mem_util_unmap_hw_va(int32_t idx,
+	enum cam_smmu_region_id region)
+{
+	int i;
+	uint32_t flags;
+	int32_t *mmu_hdls;
+	int num_hdls;
+	int fd;
+	int rc = -EINVAL;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index\n");
+		return rc;
+	}
+
+	flags = tbl.bufq[idx].flags;
+	mmu_hdls = tbl.bufq[idx].hdls;
+	num_hdls = tbl.bufq[idx].num_hdl;
+	fd = tbl.bufq[idx].fd;
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			if (rc < 0)
+				goto unmap_end;
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_unmap_iova(mmu_hdls[i],
+				fd,
+				region);
+			if (rc < 0)
+				goto unmap_end;
+		}
+	}
+
+unmap_end:
+	return rc;
+}
+
+static int cam_mem_util_unmap(int32_t idx)
+{
+	int rc = 0;
+	enum cam_smmu_region_id region;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index\n");
+		return -EINVAL;
+	}
+
+	CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
+		if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
+			ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		region = CAM_SMMU_REGION_IO;
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+		region = CAM_SMMU_REGION_SHARED;
+
+	rc = cam_mem_util_unmap_hw_va(idx,
+		region);
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].flags = 0;
+	tbl.bufq[idx].buf_handle = -1;
+	tbl.bufq[idx].vaddr = 0;
+	memset(tbl.bufq[idx].hdls, 0,
+		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
+
+	CDBG("Ion handle at idx = %d freeing = %pK, fd = %d\n",
+		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd);
+
+	if (tbl.bufq[idx].i_hdl && !tbl.bufq[idx].is_imported) {
+		CDBG("Freeing up non-imported buffer at fd = %d, hdl = %pK",
+			tbl.bufq[idx].fd,
+			tbl.bufq[idx].i_hdl);
+		ion_free(tbl.client, tbl.bufq[idx].i_hdl);
+		tbl.bufq[idx].i_hdl = NULL;
+	} else {
+		CDBG("Not freeing up imported buffer at fd = %d",
+			tbl.bufq[idx].fd);
+	}
+
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].is_imported = false;
+	tbl.bufq[idx].i_hdl = NULL;
+	tbl.bufq[idx].len = 0;
+	tbl.bufq[idx].num_hdl = 0;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	cam_mem_put_slot(idx);
+
+	return rc;
+}
+
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
+{
+	int idx;
+	int rc;
+
+	if (!cmd) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index extracted from mem handle\n");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		pr_err("Released buffer state should be active\n");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
+		pr_err("Released buf handle not matching within table\n");
+		return -EINVAL;
+	}
+
+	CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+	rc = cam_mem_util_unmap(idx);
+
+	return rc;
+}
+
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out)
+{
+	struct ion_handle *hdl;
+	int ion_fd;
+	int rc = 0;
+	uint32_t heap_id;
+	int32_t ion_flag = 0;
+	uint64_t kvaddr;
+	dma_addr_t iova = 0;
+	size_t request_len = 0;
+	int32_t idx;
+	uint32_t mem_handle;
+	int32_t smmu_hdl = 0;
+	int32_t num_hdl = 0;
+	enum cam_smmu_region_id region;
+
+	if (!inp || !out) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (inp->region != CAM_MEM_MGR_REGION_SHARED &&
+		inp->region != CAM_MEM_MGR_REGION_NON_SECURE_IO) {
+		pr_err("Invalid flags for request mem\n");
+		return -EINVAL;
+	}
+
+	if (inp->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+	rc = cam_mem_util_get_ion_buffer(inp->size,
+		inp->align,
+		heap_id,
+		ion_flag,
+		&hdl,
+		&ion_fd);
+
+	if (rc) {
+		pr_err("ION alloc failed for shared buffer\n");
+		goto ion_fail;
+	} else {
+		CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+	}
+
+	rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
+	if (rc) {
+		pr_err("Failed to get kernel vaddr\n");
+		goto map_fail;
+	}
+
+	if (!inp->smmu_hdl) {
+		pr_err("Invalid SMMU handle\n");
+		rc = -EINVAL;
+		goto smmu_fail;
+	}
+
+	if (inp->region == CAM_MEM_MGR_REGION_SHARED)
+		region = CAM_SMMU_REGION_SHARED;
+
+	if (inp->region == CAM_MEM_MGR_REGION_NON_SECURE_IO)
+		region = CAM_SMMU_REGION_IO;
+
+	rc = cam_smmu_map_iova(inp->smmu_hdl,
+		ion_fd,
+		CAM_SMMU_MAP_RW,
+		&iova,
+		&request_len,
+		region);
+
+	if (rc < 0) {
+		pr_err("SMMU mapping failed\n");
+		goto smmu_fail;
+	}
+
+	smmu_hdl = inp->smmu_hdl;
+	num_hdl = 1;
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].flags = inp->flags;
+	tbl.bufq[idx].buf_handle = mem_handle;
+	tbl.bufq[idx].kmdvaddr = kvaddr;
+
+	tbl.bufq[idx].vaddr = iova;
+
+	tbl.bufq[idx].i_hdl = hdl;
+	tbl.bufq[idx].len = inp->size;
+	tbl.bufq[idx].num_hdl = num_hdl;
+	memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+		sizeof(int32_t));
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	out->kva = kvaddr;
+	out->iova = (uint32_t)iova;
+	out->smmu_hdl = smmu_hdl;
+	out->mem_handle = mem_handle;
+	out->len = inp->size;
+	out->region = inp->region;
+
+	return rc;
+slot_fail:
+	cam_smmu_unmap_iova(inp->smmu_hdl,
+		ion_fd,
+		inp->region);
+smmu_fail:
+	ion_unmap_kernel(tbl.client, hdl);
+map_fail:
+	ion_free(tbl.client, hdl);
+ion_fail:
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_request_mem);
+
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
+{
+	int32_t idx;
+	int rc;
+
+	if (!inp) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index extracted from mem handle\n");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		pr_err("Released buffer state should be active\n");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+		pr_err("Released buf handle not matching within table\n");
+		return -EINVAL;
+	}
+
+	CDBG("Releasing hdl = %X\n", inp->mem_handle);
+	rc = cam_mem_util_unmap(idx);
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_release_mem);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
new file mode 100644
index 0000000..c5f839b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_H_
+#define _CAM_MEM_MGR_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_mem_mgr_api.h"
+
+#define CAM_MEM_BUFQ_MAX 1024
+
+/**
+ * struct cam_mem_buf_queue
+ *
+ * @i_hdl:       ion handle for the buffer
+ * @q_lock:      mutex lock for buffer
+ * @hdls:        list of mapped handles
+ * @num_hdl:     number of handles
+ * @fd:          file descriptor of buffer
+ * @buf_handle:  unique handle for buffer
+ * @align:       alignment for allocation
+ * @len:         size of buffer
+ * @flags:       attributes of buffer
+ * @vaddr:       IOVA of buffer
+ * @kmdvaddr:    Kernel virtual address
+ * @active:      state of the buffer
+ * @is_imported: Flag indicating if buffer is imported from an FD in user space
+ */
+struct cam_mem_buf_queue {
+	struct ion_handle *i_hdl;
+	struct mutex q_lock;
+	int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
+	int32_t num_hdl;
+	int32_t fd;
+	int32_t buf_handle;
+	int32_t align;
+	size_t len;
+	uint32_t flags;
+	uint64_t vaddr;
+	uint64_t kmdvaddr;
+	bool active;
+	bool is_imported;
+};
+
+/**
+ * struct cam_mem_table
+ *
+ * @m_lock: mutex lock for table
+ * @bitmap: bitmap of the mem mgr utility
+ * @bits: max bits of the utility
+ * @client: ion client pointer
+ * @bufq: array of buffers
+ */
+struct cam_mem_table {
+	struct mutex m_lock;
+	void *bitmap;
+	size_t bits;
+	struct ion_client *client;
+	struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+};
+
+/**
+ * @brief: Allocates and maps buffer
+ *
+ * @cmd:   Allocation information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd);
+
+/**
+ * @brief: Releases a buffer reference
+ *
+ * @cmd:   Buffer release information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd);
+
+/**
+ * @brief Maps a buffer
+ *
+ * @cmd: Buffer mapping information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd);
+
+/**
+ * @brief: Perform cache ops on the buffer
+ *
+ * @cmd:   Cache ops information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd);
+
+/**
+ * @brief: Initializes the memory manager
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_init(void);
+
+/**
+ * @brief:  Tears down the memory manager
+ *
+ * @return None
+ */
+void cam_mem_mgr_deinit(void);
+
+#endif /* _CAM_MEM_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
new file mode 100644
index 0000000..32a754e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_API_H_
+#define _CAM_MEM_MGR_API_H_
+
+#include <media/cam_req_mgr.h>
+
+/* Region IDs for memory manager */
+#define CAM_MEM_MGR_REGION_FIRMWARE      0
+#define CAM_MEM_MGR_REGION_SHARED        1
+#define CAM_MEM_MGR_REGION_NON_SECURE_IO 2
+#define CAM_MEM_MGR_REGION_SECURE_IO     3
+#define CAM_MEM_MGR_REGION_SCRATCH       4
+
+/**
+ * struct cam_mem_mgr_request_desc
+ *
+ * @size    : Size of memory requested for allocation
+ * @align   : Alignment of requested memory
+ * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped
+ * @flags   : Flags to indicate cached/uncached property
+ * @region  : Region where memory should be allocated
+ */
+struct cam_mem_mgr_request_desc {
+	uint64_t size;
+	uint64_t align;
+	int32_t smmu_hdl;
+	uint32_t flags;
+	uint32_t region;
+};
+
+/**
+ * struct cam_mem_mgr_memory_desc
+ *
+ * @kva        : Kernel virtual address of allocated memory
+ * @iova       : IOVA of allocated memory
+ * @smmu_hdl   : SMMU handle of allocated memory
+ * @mem_handle : Mem handle identifying allocated memory
+ * @len        : Length of allocated memory
+ * @region     : Region to which allocated memory belongs
+ */
+struct cam_mem_mgr_memory_desc {
+	uint64_t kva;
+	uint32_t iova;
+	int32_t smmu_hdl;
+	uint32_t mem_handle;
+	uint64_t len;
+	uint32_t region;
+};
+
+/**
+ * @brief: Requests a memory buffer
+ *
+ * @inp:   Information specifying requested buffer properties
+ * @out:   Information about allocated buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Releases a memory buffer
+ *
+ * @inp:   Information specifying buffer to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
+
+/**
+ * @brief: Returns IOVA information about buffer
+ *
+ * @buf_handle: Handle of the buffer
+ * @mmu_handle: SMMU handle where buffer is mapped
+ * @iova_ptr  : Pointer to mmu's iova
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr);
+/**
+ * @brief: Returns CPU address information about buffer
+ *
+ * @buf_handle: Handle for the buffer
+ * @vaddr_ptr : pointer to kernel virtual address
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
+	size_t *len);
+
+#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index a34703c..e62c101 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -18,588 +18,1960 @@
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
 #include "cam_req_mgr_workq.h"
-
-/* Forward declarations */
-static int cam_req_mgr_cb_notify_sof(
-	struct cam_req_mgr_sof_notify *sof_data);
-
+#include "cam_req_mgr_debug.h"
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 
-static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
-	.notify_sof = cam_req_mgr_cb_notify_sof,
-	.notify_err = NULL,
-	.add_req = NULL,
-};
+static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                  i = 0;
+	int                      rc = 0;
+	struct crm_task_payload *task_data = NULL;
+
+	task_data = kcalloc(
+		workq->task.num_task, sizeof(*task_data),
+		GFP_KERNEL);
+	if (!task_data) {
+		rc = -ENOMEM;
+	} else {
+		for (i = 0; i < workq->task.num_task; i++)
+			workq->task.pool[i].payload = &task_data[i];
+	}
+
+	return rc;
+}
 
 /**
- * cam_req_mgr_pvt_find_link()
+ * __cam_req_mgr_reset_req_tbl()
  *
- * @brief: Finds link matching with handle within session
- * @session: session indetifier
- * @link_hdl: link handle
+ * @brief : Initialize req table data
+ * @in_q  : request queue pointer
  *
- * Returns pointer to link matching handle
+ * @return: 0 for success, negative for failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_find_link(
-	struct cam_req_mgr_core_session *session, int32_t link_hdl)
+static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req)
 {
-	int32_t i;
+	int                           rc = 0;
+	int32_t                       i = 0;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+	struct cam_req_mgr_req_tbl   *req_tbl = req->l_tbl;
+
+	if (!in_q || !req_tbl) {
+		CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+		return -EINVAL;
+	}
+	CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+	mutex_lock(&req->lock);
+	for (i = 0; i < in_q->num_slots; i++) {
+		CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+			in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
+	}
+
+	while (req_tbl != NULL) {
+		for (i = 0; i < req_tbl->num_slots; i++) {
+			CRM_DBG("idx= %d, map= %x, state= %d",
+				req_tbl->slot[i].idx,
+				req_tbl->slot[i].req_ready_map,
+				req_tbl->slot[i].state);
+		}
+		CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+			req_tbl->id, req_tbl->pd, req_tbl->dev_count,
+			req_tbl->dev_mask, req_tbl->skip_traverse,
+			req_tbl->num_slots);
+		req_tbl = req_tbl->next;
+	}
+	mutex_unlock(&req->lock);
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_find_pd_tbl()
+ *
+ * @brief    : Find pipeline delay based table pointer which matches delay
+ * @tbl      : Pointer to list of request table
+ * @delay    : Pipeline delay value to be searched for comparison
+ *
+ * @return   : pointer to request table for matching pipeline delay table.
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
+	struct cam_req_mgr_req_tbl *tbl, int32_t delay)
+{
+	if (!tbl)
+		return NULL;
+
+	do {
+		if (delay != tbl->pd)
+			tbl = tbl->next;
+		else
+			return tbl;
+	} while (tbl != NULL);
+
+	return NULL;
+}
+
+/**
+ * __cam_req_mgr_inc_idx()
+ *
+ * @brief    : Increment val passed by step size and rollover after max_val
+ * @val      : value to be incremented
+ * @step     : amount/step by which val is incremented
+ * @max_val  : max val after which idx will roll over
+ *
+ */
+static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = (*val + step) % max_val;
+}
+
+/**
+ * __cam_req_mgr_dec_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @val      : value to be decremented
+ * @step     : amount/step by which val is decremented
+ * @max_val  : after zero value will roll over to max val
+ *
+ */
+static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = *val - step;
+	if (*val < 0)
+		*val = max_val + (*val);
+}
+
+/**
+ * __cam_req_mgr_traverse()
+ *
+ * @brief    : Traverse through pd tables, it will internally cover all linked
+ *             pd tables. Each pd table visited will check if idx passed to its
+ *             in ready state. If ready means all devices linked to the pd table
+ *             have this request id packet ready. Then it calls subsequent pd
+ *             tbl with new idx. New idx value takes into account the delta
+ *             between current pd table and next one.
+ * @traverse_data: contains all the info to traverse through pd tables
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
+{
+	int                          rc = 0;
+	int32_t                      next_idx = traverse_data->idx;
+	int32_t                      curr_idx = traverse_data->idx;
+	struct cam_req_mgr_req_tbl  *tbl;
+	struct cam_req_mgr_apply    *apply_data;
+
+	if (!traverse_data->tbl || !traverse_data->apply_data) {
+		CRM_ERR("NULL pointer %pK %pK",
+			traverse_data->tbl, traverse_data->apply_data);
+		traverse_data->result = 0;
+		return -EINVAL;
+	}
+
+	tbl = traverse_data->tbl;
+	apply_data = traverse_data->apply_data;
+	CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
+		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
+
+	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
+	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
+		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
+		tbl->skip_traverse > 0) {
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		if (rc >= 0) {
+			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+			apply_data[tbl->pd].pd = tbl->pd;
+			apply_data[tbl->pd].req_id =
+				CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
+			apply_data[tbl->pd].idx = curr_idx;
+
+			/* If traverse is sucessful decrement traverse skip */
+			if (tbl->skip_traverse > 0) {
+				apply_data[tbl->pd].req_id = -1;
+				tbl->skip_traverse--;
+			}
+		} else {
+			/* linked pd table is not ready for this traverse yet */
+			return rc;
+		}
+	} else {
+		/* This pd table is not ready to proceed with asked idx */
+		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_in_q_skip_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @in_q     : input queue pointer
+ * @idx      : Sets skip_idx bit of the particular slot to true so when traverse
+ *             happens for this idx, no req will be submitted for devices
+ *             handling this idx.
+ *
+ */
+static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
+	int32_t idx)
+{
+	in_q->slot[idx].req_id = -1;
+	in_q->slot[idx].skip_idx = 1;
+	in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+	CRM_DBG("SET IDX SKIP on slot= %d", idx);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_id()
+ *
+ * @brief    : Set unique id to table
+ * @tbl      : pipeline based table which requires new id
+ * @req      : pointer to request data wihch contains num_tables counter
+ *
+ */
+static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
+	struct cam_req_mgr_req_data *req)
+{
+	if (!tbl)
+		return;
+	do {
+		tbl->id = req->num_tbl++;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_all_skip_cnt()
+ *
+ * @brief    : Each pd table sets skip value based on delta between itself and
+ *             max pd value. During initial streamon or bubble case this is
+ *             used. That way each pd table skips required num of traverse and
+ *             align themselve with req mgr connected devs.
+ * @l_tbl    : iterates through list of pd tables and sets skip traverse
+ *
+ */
+static void __cam_req_mgr_tbl_set_all_skip_cnt(
+	struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl = *l_tbl;
+	int32_t                     max_pd;
+
+	if (!tbl)
+		return;
+
+	max_pd = tbl->pd;
+	do {
+		tbl->skip_traverse = max_pd - tbl->pd;
+		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_reset_req_slot()
+ *
+ * @brief    : reset specified idx/slot in input queue as well as all pd tables
+ * @link     : link pointer
+ * @idx      : slot index which will be reset
+ *
+ */
+static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	struct cam_req_mgr_slot      *slot;
+	struct cam_req_mgr_req_tbl   *tbl = link->req.l_tbl;
+	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+	slot = &in_q->slot[idx];
+	CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if CSL has already pushed new request*/
+	if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
+		return;
+
+	/* Reset input queue slot */
+	slot->req_id = -1;
+	slot->skip_idx = 0;
+	slot->recover = 0;
+	slot->status = CRM_SLOT_STATUS_NO_REQ;
+
+	/* Reset all pd table slot */
+	while (tbl != NULL) {
+		CRM_DBG("pd: %d: idx %d state %d",
+			tbl->pd, idx, tbl->slot[idx].state);
+		tbl->slot[idx].req_ready_map = 0;
+		tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
+		tbl = tbl->next;
+	}
+}
+
+/**
+ * __cam_req_mgr_check_next_req_slot()
+ *
+ * @brief    : While streaming if input queue does not contain any pending
+ *             request, req mgr still needs to submit pending request ids to
+ *             devices with lower pipeline delay value.
+ * @in_q     : Pointer to input queue where req mgr wil peep into
+ *
+ */
+static void __cam_req_mgr_check_next_req_slot(
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int32_t                  idx = in_q->rd_idx;
+	struct cam_req_mgr_slot *slot;
+
+	__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
+	slot = &in_q->slot[idx];
+
+	CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if there is new req from CSL, if not complete req */
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		if (in_q->wr_idx != idx)
+			CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+		__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	}
+}
+
+/**
+ * __cam_req_mgr_send_req()
+ *
+ * @brief    : send request id to be applied to each device connected on link
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @in_q     : pointer to input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int                                  rc = 0, pd, i, idx;
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_apply_request     apply_req;
+	struct cam_req_mgr_link_evt_data     evt_data;
+
+	apply_req.link_hdl = link->link_hdl;
+	apply_req.report_if_bubble = 0;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev) {
+			pd = dev->dev_info.p_delay;
+			if (pd >= CAM_PIPELINE_DELAY_MAX) {
+				CRM_WARN("pd %d greater than max",
+					pd);
+				continue;
+			}
+			if (link->req.apply_data[pd].skip_idx ||
+				link->req.apply_data[pd].req_id < 0) {
+				CRM_DBG("skip %d req_id %lld",
+					link->req.apply_data[pd].skip_idx,
+					link->req.apply_data[pd].req_id);
+				continue;
+			}
+			apply_req.dev_hdl = dev->dev_hdl;
+			apply_req.request_id =
+				link->req.apply_data[pd].req_id;
+			idx = link->req.apply_data[pd].idx;
+			apply_req.report_if_bubble =
+				in_q->slot[idx].recover;
+			CRM_DBG("SEND: pd %d req_id %lld",
+				pd, apply_req.request_id);
+			if (dev->ops && dev->ops->apply_req) {
+				rc = dev->ops->apply_req(&apply_req);
+				if (rc < 0)
+					break;
+			}
+		}
+	}
+	if (rc < 0) {
+		CRM_ERR("APPLY FAILED pd %d req_id %lld",
+			dev->dev_info.p_delay, apply_req.request_id);
+		/* Apply req failed notify already applied devs */
+		for (; i >= 0; i--) {
+			dev = &link->l_dev[i];
+			evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
+			evt_data.link_hdl =  link->link_hdl;
+			evt_data.req_id = apply_req.request_id;
+			evt_data.u.error = CRM_KMD_ERR_BUBBLE;
+			if (dev->ops && dev->ops->process_evt)
+				dev->ops->process_evt(&evt_data);
+		}
+	}
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_check_link_is_ready()
+ *
+ * @brief    : traverse through all request tables and see if all devices are
+ *             ready to apply request settings.
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @idx      : index within input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	int                            rc;
+	struct cam_req_mgr_traverse    traverse_data;
+	struct cam_req_mgr_req_queue  *in_q;
+	struct cam_req_mgr_apply      *apply_data;
+
+	in_q = link->req.in_q;
+
+	apply_data = link->req.apply_data;
+	memset(apply_data, 0,
+		sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+
+	traverse_data.apply_data = apply_data;
+	traverse_data.idx = idx;
+	traverse_data.tbl = link->req.l_tbl;
+	traverse_data.in_q = in_q;
+	traverse_data.result = 0;
+	/*
+	 *  Traverse through all pd tables, if result is success,
+	 *  apply the settings
+	 */
+
+	rc = __cam_req_mgr_traverse(&traverse_data);
+	CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+		idx, traverse_data.result, link->pd_mask, rc);
+
+	if (!rc && traverse_data.result == link->pd_mask) {
+		CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+			link->link_hdl, idx,
+			apply_data[2].req_id, apply_data[1].req_id,
+			apply_data[0].req_id);
+	} else
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_process_req()
+ *
+ * @brief    : processes read index in request queue and traverse through table
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link)
+{
+	int                                  rc = 0, idx;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_req_queue        *in_q;
+	struct cam_req_mgr_core_session     *session;
+
+	in_q = link->req.in_q;
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	/*
+	 * 1. Check if new read index,
+	 * - if in pending  state, traverse again to complete
+	 *    transaction of this read index.
+	 * - if in applied_state, somthign wrong.
+	 * - if in no_req state, no new req
+	 */
+	CRM_DBG("idx %d req_status %d",
+		in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+
+	slot = &in_q->slot[in_q->rd_idx];
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		CRM_DBG("No Pending req");
+		return 0;
+	}
+
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
+	if (rc >= 0) {
+		rc = __cam_req_mgr_send_req(link, link->req.in_q);
+		if (rc < 0) {
+			/* Apply req failed retry at next sof */
+			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		} else {
+			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+
+			if (link->state == CAM_CRM_LINK_STATE_ERR) {
+				CRM_WARN("Err recovery done idx %d status %d",
+					in_q->rd_idx,
+					in_q->slot[in_q->rd_idx].status);
+				mutex_lock(&link->lock);
+				link->state = CAM_CRM_LINK_STATE_READY;
+				mutex_unlock(&link->lock);
+			}
+
+			/*
+			 * 2. Check if any new req is pending in input queue,
+			 *    if not finish the lower pipeline delay device with
+			 *    available req ids.
+			 */
+			__cam_req_mgr_check_next_req_slot(in_q);
+
+			/*
+			 * 3. Older req slots can be safely reset as no err ack.
+			 */
+			idx = in_q->rd_idx;
+			__cam_req_mgr_dec_idx(&idx, link->max_delay + 1,
+				in_q->num_slots);
+			__cam_req_mgr_reset_req_slot(link, idx);
+		}
+	} else {
+		/*
+		 * 4.If traverse result is not success, then some devices are
+		 *   not ready with packet for the asked request id,
+		 *   hence try again in next sof
+		 */
+		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+		if (link->state == CAM_CRM_LINK_STATE_ERR) {
+			/*
+			 * During error recovery all tables should be ready
+			 *   don't expect to enter here.
+			 * @TODO: gracefully handle if recovery fails.
+			 */
+			CRM_ERR("FATAL recovery cant finish idx %d status %d",
+				in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+			rc = -EPERM;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_add_tbl_to_link()
+ *
+ * @brief    : Add table to list under link sorted by pd decremeting order
+ * @l_tbl    : list of pipeline delay tables.
+ * @new_tbl : new tbl which will be appended to above list as per its pd value
+ *
+ */
+static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
+	struct cam_req_mgr_req_tbl *new_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl;
+
+	if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
+		new_tbl->next = *l_tbl;
+		if (*l_tbl) {
+			new_tbl->pd_delta =
+				new_tbl->pd - (*l_tbl)->pd;
+		}
+		*l_tbl = new_tbl;
+	} else {
+		tbl = *l_tbl;
+
+		/* Reach existing  tbl which has less pd value */
+		while (tbl->next != NULL &&
+			new_tbl->pd < tbl->next->pd) {
+			tbl = tbl->next;
+		}
+		if (tbl->next != NULL) {
+			new_tbl->pd_delta =
+				new_tbl->pd - tbl->next->pd;
+		} else {
+			/* This is last table in linked list*/
+			new_tbl->pd_delta = 0;
+		}
+		new_tbl->next = tbl->next;
+		tbl->next = new_tbl;
+		tbl->pd_delta = tbl->pd - new_tbl->pd;
+	}
+	CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+		new_tbl->pd_delta);
+}
+
+/**
+ * __cam_req_mgr_create_pd_tbl()
+ *
+ * @brief    : Creates new request table for new delay value
+ * @delay    : New pd table allocated will have this delay value
+ *
+ * @return   : pointer to newly allocated table, NULL for failure
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
+{
+	struct cam_req_mgr_req_tbl *tbl =
+		kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
+	if (tbl != NULL) {
+		tbl->num_slots = MAX_REQ_SLOTS;
+		CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+	}
+
+	return tbl;
+}
+
+/**
+ * __cam_req_mgr_destroy_all_tbl()
+ *
+ * @brief   : This func will destroy all pipeline delay based req table structs
+ * @l_tbl    : pointer to first table in list and it has max pd .
+ *
+ */
+static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl  *tbl = *l_tbl, *temp;
+
+	CRM_DBG("*l_tbl %pK", tbl);
+	while (tbl != NULL) {
+		temp = tbl->next;
+		kfree(tbl);
+		tbl = temp;
+	}
+	*l_tbl = NULL;
+}
+
+/**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief    : Find idx from input queue at which req id is enqueued
+ * @in_q     : input request queue pointer
+ * @req_id   : request id which needs to be searched in input queue
+ *
+ * @return   : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+	int32_t                   idx, i;
+	struct cam_req_mgr_slot  *slot;
+
+	idx = in_q->wr_idx;
+	for (i = 0; i < in_q->num_slots; i++) {
+		slot = &in_q->slot[idx];
+		if (slot->req_id == req_id) {
+			CRM_DBG("req %lld found at %d %d status %d",
+				req_id, idx, slot->idx,
+				slot->status);
+			break;
+		}
+		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+	}
+	if (i >= in_q->num_slots)
+		idx = -1;
+
+	return idx;
+}
+
+/**
+ * __cam_req_mgr_setup_in_q()
+ *
+ * @brief : Initialize req table data
+ * @req   : request data pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int  __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
+{
+	int                           i;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	in_q->num_slots = MAX_REQ_SLOTS;
+
+	for (i = 0; i < in_q->num_slots; i++) {
+		in_q->slot[i].idx = i;
+		in_q->slot[i].req_id = -1;
+		in_q->slot[i].skip_idx = 0;
+		in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
+	}
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_reset_req_tbl()
+ *
+ * @brief : Initialize req table data
+ * @req   : request queue pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
+{
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CRM_ERR("NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
+	in_q->num_slots = 0;
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_sof_freeze()
+ *
+ * @brief : Apoptosis - Handles case when connected devices are not responding
+ * @data  : timer pointer
+ *
+ */
+static void __cam_req_mgr_sof_freeze(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
 	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	link = (struct cam_req_mgr_core_link *)timer->parent;
+	CRM_ERR("SOF freeze for link %x", link->link_hdl);
+}
+
+/**
+ * __cam_req_mgr_create_subdevs()
+ *
+ * @brief   : Create new crm  subdev to link with realtime devices
+ * @l_dev   : list of subdevs internal to crm
+ * @num_dev : num of subdevs to be created for link
+ *
+ * @return  : pointer to allocated list of devices
+ */
+static int __cam_req_mgr_create_subdevs(
+	struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
+{
+	int rc = 0;
+	*l_dev = (struct cam_req_mgr_connected_device *)
+		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
+		GFP_KERNEL);
+	if (!*l_dev)
+		rc = -ENOMEM;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_subdev()
+ *
+ * @brief    : Cleans up the subdevs allocated by crm for link
+ * @l_device : pointer to list of subdevs crm created
+ *
+ */
+static void __cam_req_mgr_destroy_subdev(
+	struct cam_req_mgr_connected_device *l_device)
+{
+	kfree(l_device);
+	l_device = NULL;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief    : Cleans up the mem allocated while linking
+ * @link     : pointer to link, mem associated with this link is freed
+ *
+ */
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+{
+	int32_t                                 i = 0;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+
+	mutex_lock(&link->lock);
+
+	link_data.link_enable = 0;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = NULL;
+
+	/* Using device ops unlink devices */
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev != NULL) {
+			if (dev->ops && dev->ops->link_setup)
+				dev->ops->link_setup(&link_data);
+			dev->dev_hdl = 0;
+			dev->parent = NULL;
+			dev->ops = NULL;
+		}
+	}
+	__cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
+	__cam_req_mgr_reset_in_q(&link->req);
+	link->req.num_tbl = 0;
+	mutex_destroy(&link->req.lock);
+
+	link->pd_mask = 0;
+	link->num_devs = 0;
+	link->max_delay = 0;
+
+	mutex_unlock(&link->lock);
+}
+
+/**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief: Reserves one link data struct within session
+ * @session: session identifier
+ *
+ * @return: pointer to link reserved
+ *
+ */
+static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
+	struct cam_req_mgr_core_session *session)
+{
+	struct cam_req_mgr_core_link *link;
+	struct cam_req_mgr_req_queue *in_q;
+
+	if (!session || !g_crm_core_dev) {
+		CRM_ERR("NULL session/core_dev ptr");
 		return NULL;
 	}
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_hdl == link_hdl) {
-			CRM_DBG("Link found p_delay %d",
-				 link->max_pipeline_delay);
-			spin_unlock(&link->lock);
-			break;
-		}
-		spin_unlock(&link->lock);
+	if (session->num_links >= MAX_LINKS_PER_SESSION) {
+		CRM_ERR("Reached max links %d per session limit %d",
+			session->num_links, MAX_LINKS_PER_SESSION);
+		return NULL;
 	}
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-	spin_unlock(&session->lock);
+
+	link = (struct cam_req_mgr_core_link *)
+		kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
+	if (!link) {
+		CRM_ERR("failed to create link, no mem");
+		return NULL;
+	}
+	in_q = &session->in_q;
+	mutex_init(&link->lock);
+
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	link->num_devs = 0;
+	link->max_delay = 0;
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
+	link->req.in_q = in_q;
+	in_q->num_slots = 0;
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	link->parent = (void *)session;
+	mutex_unlock(&link->lock);
+
+	mutex_lock(&session->lock);
+	session->links[session->num_links] = link;
+	session->num_links++;
+	CRM_DBG("Active session links (%d)",
+		session->num_links);
+	mutex_unlock(&session->lock);
 
 	return link;
 }
 
 /**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief  : Reserves one link data struct within session
+ * @session: session identifier
+ * @link   : link identifier
+ *
+ */
+static void __cam_req_mgr_unreserve_link(
+	struct cam_req_mgr_core_session *session,
+	struct cam_req_mgr_core_link **link)
+{
+	int32_t   i = 0;
+
+	if (!session || !*link) {
+		CRM_ERR("NULL session/link ptr %pK %pK",
+			session, *link);
+		return;
+	}
+
+	mutex_lock(&session->lock);
+	if (!session->num_links)
+		CRM_WARN("No active link or invalid state %d",
+			session->num_links);
+	else {
+		for (i = 0; i < session->num_links; i++) {
+			if (session->links[i] == *link)
+				session->links[i] = NULL;
+		}
+		session->num_links--;
+		CRM_DBG("Active session links (%d)",
+			session->num_links);
+	}
+	kfree(*link);
+	*link = NULL;
+	mutex_unlock(&session->lock);
+
+}
+
+/* Workqueue context processing section */
+
+/**
+ * cam_req_mgr_process_send_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to send
+ *         apply request id to drivers.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_send_req(void *priv, void *data)
+{
+	int                                 rc = 0;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_send_request     *send_req = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	send_req = (struct cam_req_mgr_send_request *)data;
+	in_q = send_req->in_q;
+
+	rc = __cam_req_mgr_send_req(link, in_q);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_flush_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which requests need to be removedcancelled.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_flush_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx = -1;
+	struct cam_req_mgr_flush_info       *flush_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_flush_request     flush_req;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld type %d",
+		flush_info->link_hdl,
+		flush_info->req_id,
+		flush_info->flush_type);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		for (i = 0; i < in_q->num_slots; i++) {
+			slot = &in_q->slot[i];
+			slot->req_id = -1;
+			slot->skip_idx = 1;
+			slot->status = CRM_SLOT_STATUS_NO_REQ;
+		}
+		in_q->wr_idx = 0;
+		in_q->rd_idx = 0;
+	} else if (flush_info->flush_type ==
+		CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			flush_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				flush_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
+				slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be cancelled",
+					flush_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		}
+	}
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		flush_req.link_hdl = flush_info->link_hdl;
+		flush_req.dev_hdl = device->dev_hdl;
+		flush_req.req_id = flush_info->req_id;
+		flush_req.type = flush_info->flush_type;
+		/* @TODO: error return handling from drivers */
+		if (device->ops && device->ops->flush_req)
+			rc = device->ops->flush_req(&flush_req);
+	}
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_sched_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_sched_req(void *priv, void *data)
+{
+	int                               rc = 0;
+	struct cam_req_mgr_sched_request *sched_req = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_req_queue     *in_q = NULL;
+	struct cam_req_mgr_slot          *slot = NULL;
+	struct crm_task_payload          *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld",
+		sched_req->link_hdl,
+		sched_req->req_id);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	slot = &in_q->slot[in_q->wr_idx];
+
+	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+		CRM_ERR("in_q overwrite %d", slot->status);
+		/* @TODO: error handling */
+	}
+	CRM_DBG("sched_req %lld at slot %d",
+		sched_req->req_id, in_q->wr_idx);
+
+	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
+	slot->req_id = sched_req->req_id;
+	slot->skip_idx = 0;
+	slot->recover = sched_req->bubble_enable;
+	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	mutex_unlock(&link->req.lock);
+
+	complete(&link->workq_comp);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_add_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_add_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx;
+	struct cam_req_mgr_add_request      *add_req = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_req_tbl          *tbl = NULL;
+	struct cam_req_mgr_tbl_slot         *slot = NULL;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	add_req = (struct cam_req_mgr_add_request *)&task_data->u;
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		if (device->dev_hdl == add_req->dev_hdl) {
+			tbl = device->pd_tbl;
+			break;
+		}
+	}
+	if (!tbl) {
+		CRM_ERR("dev_hdl not found %x, %x %x",
+			add_req->dev_hdl,
+			link->l_dev[0].dev_hdl,
+			link->l_dev[1].dev_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	/*
+	 * Go through request table and add
+	 * request id to proper table
+	 * 1. find req slot in in_q matching req_id.sent by dev
+	 * 2. goto table of this device based on p_delay
+	 * 3. mark req_ready_map with this dev_bit.
+	 */
+
+	mutex_lock(&link->req.lock);
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -EBADSLT;
+		mutex_unlock(&link->req.lock);
+		goto end;
+	}
+	slot = &tbl->slot[idx];
+	if (slot->state != CRM_REQ_STATE_PENDING &&
+		slot->state != CRM_REQ_STATE_EMPTY) {
+		CRM_WARN("Unexpected state %d for slot %d map %x",
+			slot->state, idx, slot->req_ready_map);
+	}
+
+	slot->state = CRM_REQ_STATE_PENDING;
+	slot->req_ready_map |= (1 << device->dev_bit);
+
+	CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
+		slot->req_ready_map);
+
+	if (slot->req_ready_map == tbl->dev_mask) {
+		CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+			idx, add_req->req_id, tbl->pd);
+		slot->state = CRM_REQ_STATE_READY;
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_error()
+ *
+ * @brief: This runs in workque thread context. bubble /err recovery.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_error(void *priv, void *data)
+{
+	int                                  rc = 0, idx = -1, i;
+	struct cam_req_mgr_error_notify     *err_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CRM_ERR("input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
+	CRM_DBG("link_hdl %x req_id %lld error %d",
+		err_info->link_hdl,
+		err_info->req_id,
+		err_info->error);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
+		if (idx < 0) {
+			CRM_ERR("req_id %lld not found in input queue",
+			err_info->req_id);
+		} else {
+			CRM_DBG("req_id %lld found at idx %d",
+				err_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (!slot->recover) {
+				CRM_WARN("err recovery disabled req_id %lld",
+					err_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return 0;
+			} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
+			&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+				CRM_WARN("req_id %lld can not be recovered %d",
+					err_info->req_id, slot->status);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			/* Notify all devices in the link about error */
+			for (i = 0; i < link->num_devs; i++) {
+				device = &link->l_dev[i];
+				if (device != NULL) {
+					evt_data.dev_hdl = device->dev_hdl;
+					evt_data.evt_type =
+						CAM_REQ_MGR_LINK_EVT_ERR;
+					evt_data.link_hdl =  link->link_hdl;
+					evt_data.req_id = err_info->req_id;
+					evt_data.u.error = err_info->error;
+					if (device->ops &&
+						device->ops->process_evt)
+						rc = device->ops->
+							process_evt(&evt_data);
+				}
+			}
+			/* Bring processing pointer to bubbled req id */
+			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+			in_q->rd_idx = idx;
+			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+			mutex_lock(&link->lock);
+			link->state = CAM_CRM_LINK_STATE_ERR;
+			mutex_unlock(&link->lock);
+		}
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
  * cam_req_mgr_process_sof()
  *
  * @brief: This runs in workque thread context. Call core funcs to check
- * which peding requests can be processed.
- * @data:contains information about frame_id, link etc.
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
  *
- * Returns 0 on success.
+ * @return: 0 on success.
  */
 static int cam_req_mgr_process_sof(void *priv, void *data)
 {
-	int ret = 0, i = 0;
-	struct cam_req_mgr_sof_notify *sof_data = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
-	struct cam_req_mgr_connected_device *device = NULL;
-	struct cam_req_mgr_apply_request apply_req;
+	int                                  rc = 0;
+	struct cam_req_mgr_sof_notify       *sof_data = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
 		CRM_ERR("input args NULL %pK %pK", data, priv);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
-	sof_data = (struct cam_req_mgr_sof_notify *)data;
+	task_data = (struct crm_task_payload *)data;
+	sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
 
 	CRM_DBG("link_hdl %x frame_id %lld",
 		sof_data->link_hdl,
 		sof_data->frame_id);
 
-	apply_req.link_hdl = sof_data->link_hdl;
-	/* @TODO: go through request table and issue
-	 * request id based on dev status
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	/*
+	 * Check if current read index is in applied state, if yes make it free
+	 *    and increment read index to next slot.
 	 */
-	apply_req.request_id = sof_data->frame_id;
-	apply_req.report_if_bubble = 0;
+	CRM_DBG("link_hdl %x curent idx %d req_status %d",
+		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
-	CRM_DBG("link %pK l_dev %pK num_dev %d",
-		link, link->l_devices, link->num_connections);
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		if (device != NULL) {
-			CRM_DBG("dev_id %d dev_hdl %x ops %pK p_delay %d",
-				device->dev_info.dev_id, device->dev_hdl,
-				device->ops, device->dev_info.p_delay);
-			apply_req.dev_hdl = device->dev_hdl;
-			if (device->ops && device->ops->apply_req) {
-				ret = device->ops->apply_req(&apply_req);
-				/* Error handling for this failure is pending */
-				if (ret < 0)
-					CRM_ERR("Failure:%d dev=%d", ret,
-						device->dev_info.dev_id);
-			}
+	if (link->state == CAM_CRM_LINK_STATE_ERR)
+		CRM_WARN("Error recovery idx %d status %d",
+			in_q->rd_idx,
+			in_q->slot[in_q->rd_idx].status);
 
-		}
+	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
+		/*
+		 * Do NOT reset req q slot data here, it can not be done
+		 * here because we need to preserve the data to handle bubble.
+		 */
+		__cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
 	}
+	rc = __cam_req_mgr_process_req(link);
+	mutex_unlock(&link->req.lock);
 
 end:
-	return ret;
+	return rc;
 }
 
-/**
- * cam_req_mgr_notify_sof()
- *
- * @brief: SOF received from device, sends trigger through workqueue
- * @sof_data: contains information about frame_id, link etc.
- *
- * Returns 0 on success
- */
-static int cam_req_mgr_cb_notify_sof(struct cam_req_mgr_sof_notify *sof_data)
-{
-	int                           ret = 0;
-	struct crm_workq_task        *task = NULL;
-	struct cam_req_mgr_core_link *link = NULL;
 
-	if (!sof_data) {
+/* Linked devices' Callback section */
+
+/**
+ * cam_req_mgr_cb_add_req()
+ *
+ * @brief    : Drivers call this function to notify new packet is available.
+ * @add_req  : Information about new request available at a device.
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
+{
+	int                             rc = 0, idx;
+	struct crm_workq_task          *task = NULL;
+	struct cam_req_mgr_core_link   *link = NULL;
+	struct cam_req_mgr_add_request *dev_req;
+	struct crm_task_payload        *task_data;
+
+	if (!add_req) {
 		CRM_ERR("sof_data is NULL");
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
 	}
 
-	CRM_DBG("link_hdl %x frame_id %lld",
-		sof_data->link_hdl,
-		sof_data->frame_id);
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(add_req->link_hdl);
+
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Validate if req id is present in input queue */
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		rc = -ENOENT;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task dev %x req %lld",
+			add_req->dev_hdl, add_req->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
+	dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
+	dev_req->req_id = add_req->req_id;
+	dev_req->link_hdl = add_req->link_hdl;
+	dev_req->dev_hdl = add_req->dev_hdl;
+	task->process_cb = &cam_req_mgr_process_add_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_err()
+ *
+ * @brief    : Error received from device, sends bubble recovery
+ * @err_info : contains information about error occurred like bubble/overflow
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_notify_err(
+	struct cam_req_mgr_error_notify *err_info)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_error_notify *notify_err;
+	struct crm_task_payload         *task_data;
+
+	if (!err_info) {
+		CRM_ERR("err_info is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(err_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	crm_timer_reset(link->watchdog);
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CRM_ERR("no empty task req_id %lld", err_info->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
+	notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
+	notify_err->req_id = err_info->req_id;
+	notify_err->link_hdl = err_info->link_hdl;
+	notify_err->dev_hdl = err_info->dev_hdl;
+	notify_err->error = err_info->error;
+	task->process_cb = &cam_req_mgr_process_error;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_sof()
+ *
+ * @brief   : SOF received from device, sends trigger through workqueue
+ * @sof_data: contains information about frame_id, link etc.
+ *
+ * @return  : 0 on success
+ *
+ */
+static int cam_req_mgr_cb_notify_sof(
+	struct cam_req_mgr_sof_notify *sof_data)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_sof_notify   *notify_sof;
+	struct crm_task_payload         *task_data;
+
+	if (!sof_data) {
+		CRM_ERR("sof_data is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
 
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sof_data->link_hdl);
 	if (!link) {
 		CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
-		ret = -EINVAL;
+		rc = -EINVAL;
 		goto end;
-
 	}
 
+	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
 		CRM_ERR("no empty task frame %lld", sof_data->frame_id);
-		ret = -EBUSY;
+		rc = -EBUSY;
 		goto end;
 	}
-	task->type = CRM_WORKQ_TASK_NOTIFY_SOF;
-	task->u.notify_sof.frame_id = sof_data->frame_id;
-	task->u.notify_sof.link_hdl = sof_data->link_hdl;
-	task->u.notify_sof.dev_hdl = sof_data->dev_hdl;
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
+	notify_sof = (struct cam_req_mgr_sof_notify *)&task_data->u;
+	notify_sof->frame_id = sof_data->frame_id;
+	notify_sof->link_hdl = sof_data->link_hdl;
+	notify_sof->dev_hdl = sof_data->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_sof;
-	task->priv = link;
-	cam_req_mgr_workq_enqueue_task(task);
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
 end:
-	return ret;
+	return rc;
 }
 
+static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
+	.notify_sof = cam_req_mgr_cb_notify_sof,
+	.notify_err = cam_req_mgr_cb_notify_err,
+	.add_req    = cam_req_mgr_cb_add_req,
+};
+
 /**
- * cam_req_mgr_pvt_reserve_link()
+ * __cam_req_mgr_setup_link_info()
  *
- * @brief: Reserves one link data struct within session
- * @session: session identifier
+ * @brief     : Sets up input queue, create pd based tables, communicate with
+ *              devs connected on this link and setup communication.
+ * @link      : pointer to link to setup
+ * @link_info : link_info coming from CSL to prepare link
  *
- * Returns pointer to link reserved
+ * @return    : 0 on success, negative in case of failure
+ *
  */
-static struct cam_req_mgr_core_link *cam_req_mgr_pvt_reserve_link(
-	struct cam_req_mgr_core_session *session)
+static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_link_info *link_info)
 {
-	int32_t i;
-	struct cam_req_mgr_core_link *link;
+	int                                     rc = 0, i = 0;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_req_tbl             *pd_tbl;
+	enum cam_pipeline_delay                 max_delay;
 
-	if (!session) {
-		CRM_ERR("NULL session ptr");
-		return NULL;
-	}
+	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
+		return -EPERM;
 
-	spin_lock(&session->lock);
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		link = &session->links[i];
-		spin_lock(&link->lock);
-		if (link->link_state == CAM_CRM_LINK_STATE_AVAILABLE) {
-			link->num_connections = 0;
-			link->max_pipeline_delay = 0;
-			memset(link->req_table, 0,
-				sizeof(struct cam_req_mgr_request_table));
-			link->link_state = CAM_CRM_LINK_STATE_IDLE;
-			spin_unlock(&link->lock);
-			break;
+	mutex_init(&link->req.lock);
+	CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+	link->req.num_tbl = 0;
+
+	rc = __cam_req_mgr_setup_in_q(&link->req);
+	if (rc < 0)
+		return rc;
+
+	mutex_lock(&link->lock);
+	max_delay = CAM_PIPELINE_DELAY_0;
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+		/* Using dev hdl, get ops ptr to communicate with device */
+		dev->ops = (struct cam_req_mgr_kmd_ops *)
+			cam_get_device_ops(link_info->dev_hdls[i]);
+		if (!dev->ops ||
+			!dev->ops->get_dev_info ||
+			!dev->ops->link_setup) {
+			CRM_ERR("FATAL: device ops NULL");
+			rc = -ENXIO;
+			goto error;
 		}
-		spin_unlock(&link->lock);
-	}
-	CRM_DBG("Link available (total %d)", session->num_active_links);
-	spin_unlock(&session->lock);
-
-	if (i >= MAX_LINKS_PER_SESSION)
-		link = NULL;
-
-	return link;
-}
-
-/**
- * cam_req_mgr_pvt_create_subdevs()
- *
- * @brief: Create new crm  subdev to link with realtime devices
- * @l_devices: list of subdevs internal to crm
- * @num_dev: num of subdevs to be created for link
- *
- * Returns pointer to allocated list of devices
- */
-static struct cam_req_mgr_connected_device *
-	cam_req_mgr_pvt_create_subdevs(int32_t num_dev)
-{
-	struct cam_req_mgr_connected_device *l_devices;
-
-	l_devices = (struct cam_req_mgr_connected_device *)
-		kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
-		GFP_KERNEL);
-	if (!l_devices)
-		CRM_DBG("Insufficient memory %lu",
-			sizeof(struct cam_req_mgr_connected_device) * num_dev);
-
-	return l_devices;
-}
-
-/**
- * cam_req_mgr_pvt_destroy_subdev()
- *
- * @brief: Cleans up the subdevs allocated by crm for link
- * @l_device: pointer to list of subdevs crm created
- *
- * Returns 0 for success
- */
-static int cam_req_mgr_pvt_destroy_subdev(
-	struct cam_req_mgr_connected_device **l_device)
-{
-	int ret = 0;
-
-	if (!(*l_device))
-		ret = -EINVAL;
-	else {
-		kfree(*l_device);
-		*l_device = NULL;
+		dev->dev_hdl = link_info->dev_hdls[i];
+		dev->parent = (void *)link;
+		dev->dev_info.dev_hdl = dev->dev_hdl;
+		rc = dev->ops->get_dev_info(&dev->dev_info);
+		CRM_DBG("%x: connected: %s, id %d, delay %d",
+			link_info->session_hdl, dev->dev_info.name,
+			dev->dev_info.dev_id, dev->dev_info.p_delay);
+		if (rc < 0 ||
+			dev->dev_info.p_delay >=
+			CAM_PIPELINE_DELAY_MAX ||
+			dev->dev_info.p_delay <
+			CAM_PIPELINE_DELAY_0) {
+			CRM_ERR("get device info failed");
+			goto error;
+		} else {
+			CRM_DBG("%x: connected: %s, delay %d",
+				link_info->session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.p_delay);
+			if (dev->dev_info.p_delay >
+				max_delay)
+			max_delay =
+				dev->dev_info.p_delay;
+		}
 	}
 
-	return ret;
+
+	link_data.link_enable = 1;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = &cam_req_mgr_ops;
+	link_data.max_delay = max_delay;
+
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+
+		link_data.dev_hdl = dev->dev_hdl;
+		/*
+		 * For unique pipeline delay table create request
+		 * tracking table
+		 */
+		if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
+			pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
+				dev->dev_info.p_delay);
+			if (!pd_tbl) {
+				CRM_ERR("pd %d tbl not found",
+					dev->dev_info.p_delay);
+				rc = -ENXIO;
+				goto error;
+			}
+		} else {
+			pd_tbl = __cam_req_mgr_create_pd_tbl(
+				dev->dev_info.p_delay);
+			if (pd_tbl == NULL) {
+				CRM_ERR("create new pd tbl failed");
+				rc = -ENXIO;
+				goto error;
+			}
+			pd_tbl->pd = dev->dev_info.p_delay;
+			link->pd_mask |= (1 << pd_tbl->pd);
+			/*
+			 * Add table to list and also sort list
+			 * from max pd to lowest
+			 */
+			__cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
+		}
+		dev->dev_bit = pd_tbl->dev_count++;
+		dev->pd_tbl = pd_tbl;
+		pd_tbl->dev_mask |= (1 << dev->dev_bit);
+
+		/* Communicate with dev to establish the link */
+		dev->ops->link_setup(&link_data);
+
+		if (link->max_delay < dev->dev_info.p_delay)
+			link->max_delay = dev->dev_info.p_delay;
+	}
+	link->num_devs = link_info->num_devices;
+
+	/* Assign id for pd tables */
+	__cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
+
+	/* At start, expect max pd devices, all are in skip state */
+	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+
+	mutex_unlock(&link->lock);
+	return 0;
+
+error:
+	__cam_req_mgr_destroy_link_info(link);
+	return rc;
 }
 
+/* IOCTLs handling section */
 int cam_req_mgr_create_session(
 	struct cam_req_mgr_session_info *ses_info)
 {
-	int ret = 0;
-	int32_t i;
-	int32_t session_hdl;
-	struct cam_req_mgr_core_session *cam_session;
+	int                              rc = 0;
+	int32_t                          session_hdl;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		kzalloc(sizeof(*cam_session), GFP_KERNEL);
 	if (!cam_session) {
-		ret = -ENOMEM;
+		rc = -ENOMEM;
 		goto end;
 	}
 
 	session_hdl = cam_create_session_hdl((void *)cam_session);
 	if (session_hdl < 0) {
 		CRM_ERR("unable to create session_hdl = %x", session_hdl);
-		ret = session_hdl;
-		goto session_hdl_failed;
+		rc = session_hdl;
+		kfree(cam_session);
+		goto end;
 	}
 	ses_info->session_hdl = session_hdl;
+
+	mutex_init(&cam_session->lock);
+	CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+
+	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
-
-	spin_lock_init(&cam_session->lock);
-	cam_session->num_active_links = 0;
-
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-		spin_lock_init(&cam_session->links[i].lock);
-		cam_session->links[i].link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-		INIT_LIST_HEAD(&cam_session->links[i].link_head);
-		cam_session->links[i].workq = NULL;
-	}
+	cam_session->num_links = 0;
 	list_add(&cam_session->entry, &g_crm_core_dev->session_head);
-
-	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
-session_hdl_failed:
-	kfree(cam_session);
+	mutex_unlock(&cam_session->lock);
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
+	return rc;
 }
 
 int cam_req_mgr_destroy_session(
 		struct cam_req_mgr_session_info *ses_info)
 {
-	int ret;
-	int32_t i;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link = NULL;
+	int rc;
+	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_ERR("NULL session info pointer");
+		CRM_DBG("NULL session info pointer");
 		return -EINVAL;
 	}
 
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(ses_info->session_hdl);
-	if (cam_session == NULL) {
+	if (!cam_session) {
 		CRM_ERR("failed to get session priv");
-		ret = -ENOENT;
+		rc = -ENOENT;
 		goto end;
 
 	}
-	spin_lock(&cam_session->lock);
-	for (i = 0; i < cam_session->num_active_links; i++) {
-		link = &cam_session->links[i];
-		CRM_ERR("session %x active_links %d hdl %x connections %d",
+	mutex_lock(&cam_session->lock);
+	if (cam_session->num_links) {
+		CRM_ERR("destroy session %x num_active_links %d",
 			ses_info->session_hdl,
-			cam_session->num_active_links,
-			link->link_hdl, link->num_connections);
+			cam_session->num_links);
+		/* @TODO : Go through active links and destroy ? */
 	}
 	list_del(&cam_session->entry);
-	spin_unlock(&cam_session->lock);
+	mutex_unlock(&cam_session->lock);
+	mutex_destroy(&cam_session->lock);
 	kfree(cam_session);
 
-	ret = cam_destroy_session_hdl(ses_info->session_hdl);
-	if (ret)
-		CRM_ERR("unable to destroy session_hdl = %x ret %d",
-			ses_info->session_hdl, ret);
+	rc = cam_destroy_session_hdl(ses_info->session_hdl);
+	if (rc < 0)
+		CRM_ERR("unable to destroy session_hdl = %x rc %d",
+			ses_info->session_hdl, rc);
 
 end:
 	mutex_unlock(&g_crm_core_dev->crm_lock);
-	return ret;
-
+	return rc;
 }
 
 int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 {
-	int ret = 0;
-	int32_t i, link_hdl;
-	char buf[128];
-	struct cam_create_dev_hdl root_dev;
-	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_core_dev_link_setup link_data;
-	struct cam_req_mgr_connected_device *l_devices;
-	enum cam_pipeline_delay max_delay = CAM_PIPELINE_DELAY_0;
+	int                                     rc = 0;
+	char                                    buf[128];
+	struct cam_create_dev_hdl               root_dev;
+	struct cam_req_mgr_core_session        *cam_session;
+	struct cam_req_mgr_core_link           *link;
 
 	if (!link_info) {
-		CRM_ERR("NULL pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
-
 	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
 		CRM_ERR("Invalid num devices %d", link_info->num_devices);
 		return -EINVAL;
 	}
 
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(link_info->session_hdl);
 	if (!cam_session) {
-		CRM_ERR("NULL session pointer");
+		CRM_DBG("NULL pointer");
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_reserve_link(cam_session);
+	mutex_lock(&g_crm_core_dev->crm_lock);
+
+	/* Allocate link struct and map it with session's request queue */
+	link = __cam_req_mgr_reserve_link(cam_session);
 	if (!link) {
-		CRM_ERR("NULL link pointer");
+		CRM_ERR("failed to reserve new link");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	CRM_DBG("link reserved %pK %x", link, link->link_hdl);
 
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->session_hdl;
 	root_dev.priv = (void *)link;
 
-	link_hdl = cam_create_device_hdl(&root_dev);
-	if (link_hdl < 0) {
+	mutex_lock(&link->lock);
+	/* Create unique dev handle for link */
+	link->link_hdl = cam_create_device_hdl(&root_dev);
+	if (link->link_hdl < 0) {
 		CRM_ERR("Insufficient memory to create new device handle");
-		ret = link_hdl;
+		mutex_unlock(&link->lock);
+		rc = link->link_hdl;
 		goto link_hdl_fail;
 	}
+	mutex_unlock(&link->lock);
+	link_info->link_hdl = link->link_hdl;
 
-	l_devices = cam_req_mgr_pvt_create_subdevs(link_info->num_devices);
-	if (!l_devices) {
-		ret = -ENOMEM;
+	/* Allocate memory to hold data of all linked devs */
+	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+		link_info->num_devices);
+	if (rc < 0) {
+		CRM_ERR("Insufficient memory to create new crm subdevs");
 		goto create_subdev_failed;
 	}
 
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->get_dev_info) {
-				ret = l_devices[i].ops->get_dev_info(
-					&l_devices[i].dev_info);
-				if (ret < 0 ||
-					l_devices[i].dev_info.p_delay >=
-					CAM_PIPELINE_DELAY_MAX ||
-					l_devices[i].dev_info.p_delay <
-					CAM_PIPELINE_DELAY_0) {
-					CRM_ERR("get device info failed");
-					goto error;
-				} else {
-					CRM_DBG("%x: connected: %s, delay %d",
-						link_info->session_hdl,
-						l_devices[i].dev_info.name,
-						l_devices[i].dev_info.p_delay);
-					if (l_devices[i].dev_info.p_delay >
-						max_delay)
-					max_delay =
-						l_devices[i].dev_info.p_delay;
-				}
-			}
-		} else {
-			CRM_ERR("FATAL: device ops NULL");
-			ret = -ENXIO;
-			goto error;
-		}
-	}
+	/* Using device ops query connected devs, prepare request tables */
+	rc = __cam_req_mgr_setup_link_info(link, link_info);
+	if (rc < 0)
+		goto setup_failed;
 
-	link_data.link_enable = true;
-	link_data.link_hdl = link_hdl;
-	link_data.crm_cb = &cam_req_mgr_ops;
-	link_data.max_delay = max_delay;
-
-	/* After getting info about all devices, establish link */
-	for (i = 0; i < link_info->num_devices; i++) {
-		l_devices[i].dev_hdl = link_info->dev_hdls[i];
-		l_devices[i].parent = (void *)link;
-		l_devices[i].ops = (struct cam_req_mgr_kmd_ops *)
-			cam_get_device_ops(link_info->dev_hdls[i]);
-		link_data.dev_hdl = l_devices[i].dev_hdl;
-		l_devices[i].dev_info.dev_hdl = l_devices[i].dev_hdl;
-		if (l_devices[i].ops) {
-			if (l_devices[i].ops->link_setup) {
-				ret = l_devices[i].ops->link_setup(&link_data);
-				if (ret < 0) {
-					/* TODO check handlng of this failure */
-					CRM_ERR("link setup failed");
-					goto error;
-				}
-			}
-		}
-		list_add_tail(&l_devices[i].entry, &link->link_head);
-	}
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_READY;
+	mutex_unlock(&link->lock);
 
 	/* Create worker for current link */
-	snprintf(buf, sizeof(buf), "%x-%x", link_info->session_hdl, link_hdl);
-	ret = cam_req_mgr_workq_create(buf, &link->workq);
-	if (ret < 0) {
+	snprintf(buf, sizeof(buf), "%x-%x",
+		link_info->session_hdl, link->link_hdl);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
-		goto error;
+		__cam_req_mgr_destroy_link_info(link);
+		goto setup_failed;
 	}
 
-	link_info->link_hdl = link_hdl;
-	spin_lock(&link->lock);
-	link->l_devices = l_devices;
-	link->link_hdl = link_hdl;
-	link->parent = (void *)cam_session;
-	link->num_connections = link_info->num_devices;
-	link->link_state = CAM_CRM_LINK_STATE_READY;
-	spin_unlock(&link->lock);
+	/* Assign payload to workqueue tasks */
+	rc = __cam_req_mgr_setup_payload(link->workq);
+	if (rc < 0) {
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links++;
-	spin_unlock(&cam_session->lock);
+	/* Start watchdong timer to detect if camera hw goes into bad state */
+	rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
+		link, &__cam_req_mgr_sof_freeze);
+	if (rc < 0) {
+		kfree(link->workq->task.pool[0].payload);
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
 
-	return ret;
-
-error:
-	cam_req_mgr_pvt_destroy_subdev(&l_devices);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+setup_failed:
+	__cam_req_mgr_destroy_subdev(link->l_dev);
 create_subdev_failed:
-	cam_destroy_device_hdl(link_hdl);
+	cam_destroy_device_hdl(link->link_hdl);
+	link_info->link_hdl = 0;
 link_hdl_fail:
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	spin_unlock(&link->lock);
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	mutex_unlock(&link->lock);
 
-	return ret;
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
 }
 
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
 {
-	int ret = 0;
-	int32_t i = 0;
+	int                              rc = 0;
 	struct cam_req_mgr_core_session *cam_session;
-	struct cam_req_mgr_core_link *link;
-	struct cam_req_mgr_connected_device *device;
-	struct cam_req_mgr_core_dev_link_setup link_data;
+	struct cam_req_mgr_core_link    *link;
 
 	if (!unlink_info) {
 		CRM_ERR("NULL pointer");
 		return -EINVAL;
 	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+
+	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
-	cam_get_device_priv(unlink_info->session_hdl);
+		cam_get_device_priv(unlink_info->session_hdl);
 	if (!cam_session) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
 
-	link = cam_req_mgr_pvt_find_link(cam_session,
-		unlink_info->link_hdl);
+	/* link hdl's priv data is core_link struct */
+	link = cam_get_device_priv(unlink_info->link_hdl);
 	if (!link) {
 		CRM_ERR("NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+	__cam_req_mgr_print_req_tbl(&link->req);
 
-	ret = cam_destroy_device_hdl(link->link_hdl);
-	if (ret < 0) {
-		CRM_ERR("error in destroying dev handle %d %x",
-			ret, link->link_hdl);
-		ret = -EINVAL;
-	}
-	link_data.link_enable = false;
-	link_data.link_hdl = link->link_hdl;
-	link_data.crm_cb = NULL;
-	for (i = 0; i < link->num_connections; i++) {
-		device = &link->l_devices[i];
-		link_data.dev_hdl = device->dev_hdl;
-		if (device->ops && device->ops->link_setup)
-			device->ops->link_setup(&link_data);
-		device->dev_hdl = 0;
-		device->parent = NULL;
-		device->ops = NULL;
-		list_del(&device->entry);
-	}
-	/* Destroy worker of link */
-	cam_req_mgr_workq_destroy(link->workq);
-	spin_lock(&link->lock);
-	link->link_state = CAM_CRM_LINK_STATE_AVAILABLE;
-	link->parent = NULL;
-	link->num_connections = 0;
-	link->link_hdl = 0;
-	link->workq = NULL;
-	spin_unlock(&link->lock);
+	/* Destroy workq payload data */
+	kfree(link->workq->task.pool[0].payload);
+	link->workq->task.pool[0].payload = NULL;
 
-	spin_lock(&cam_session->lock);
-	cam_session->num_active_links--;
-	spin_unlock(&cam_session->lock);
+	/* Destroy workq and timer of link */
+	crm_timer_exit(&link->watchdog);
 
-	ret = cam_req_mgr_pvt_destroy_subdev(&link->l_devices);
-	if (ret < 0) {
-		CRM_ERR("error while destroying subdev link %x",
-			link_data.link_hdl);
-		ret = -EINVAL;
+	cam_req_mgr_workq_destroy(&link->workq);
+
+	/* Cleanuprequest tables */
+	__cam_req_mgr_destroy_link_info(link);
+
+	/* Free memory holding data of linked devs */
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+
+	/* Destroy the link handle */
+	rc = cam_destroy_device_hdl(unlink_info->link_hdl);
+	if (rc < 0) {
+		CRM_ERR("error while destroying dev handle %d %x",
+			rc, link->link_hdl);
 	}
 
-	return ret;
+	/* Free curent link and put back into session's free pool of links */
+	__cam_req_mgr_unreserve_link(cam_session, &link);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+
+	return rc;
 }
 
 int cam_req_mgr_schedule_request(
 			struct cam_req_mgr_sched_request *sched_req)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_core_session  *session = NULL;
+	struct cam_req_mgr_sched_request *sched;
+	struct crm_task_payload          *task_data;
+
 	if (!sched_req) {
-		CRM_ERR("NULL pointer");
+		CRM_ERR("csl_req is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(sched_req->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+		return -EINVAL;
+	}
+	session = (struct cam_req_mgr_core_session *)link->parent;
+	if (!session) {
+		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task)
+		return -ENOMEM;
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	sched->req_id = sched_req->req_id;
+	sched->link_hdl = sched_req->link_hdl;
+	if (session->force_err_recovery == AUTO_RECOVERY) {
+		sched->bubble_enable = sched_req->bubble_enable;
+	} else {
+		sched->bubble_enable =
+		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
+	}
+	task->process_cb = &cam_req_mgr_process_sched_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
-int cam_req_mgr_sync_mode(
-			struct cam_req_mgr_sync_mode *sync_links)
+int cam_req_mgr_sync_link(
+	struct cam_req_mgr_sync_mode *sync_links)
 {
 	if (!sync_links) {
 		CRM_ERR("NULL pointer");
@@ -611,15 +1983,70 @@
 }
 
 int cam_req_mgr_flush_requests(
-			struct cam_req_mgr_flush_info *flush_info)
+	struct cam_req_mgr_flush_info *flush_info)
 {
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_flush_info    *flush;
+	struct crm_task_payload          *task_data;
+	struct cam_req_mgr_core_session  *session = NULL;
+
 	if (!flush_info) {
-		CRM_ERR("NULL pointer");
-		return -EINVAL;
+		CRM_ERR("flush req is NULL");
+		rc = -EFAULT;
+		goto end;
+	}
+	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
+		CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+		rc = -EINVAL;
+		goto end;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	/* session hdl's priv data is cam session struct */
+	session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(flush_info->session_hdl);
+	if (!session) {
+		CRM_ERR("Invalid session %x", flush_info->session_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	if (session->num_links <= 0) {
+		CRM_WARN("No active links in session %x",
+		flush_info->session_hdl);
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(flush_info->link_hdl);
+	if (!link) {
+		CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
+	flush = (struct cam_req_mgr_flush_info *)&task_data->u;
+	flush->req_id = flush_info->req_id;
+	flush->link_hdl = flush_info->link_hdl;
+	flush->flush_type = flush_info->flush_type;
+	task->process_cb = &cam_req_mgr_process_flush_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	init_completion(&link->workq_comp);
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	return rc;
 }
 
 
@@ -639,6 +2066,7 @@
 	CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
 	INIT_LIST_HEAD(&g_crm_core_dev->session_head);
 	mutex_init(&g_crm_core_dev->crm_lock);
+	cam_req_mgr_debug_register(g_crm_core_dev);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 7679f20..889ee9c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -14,213 +14,344 @@
 
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_timer.h"
 
-#define CAM_REQ_MGR_MAX_LINKED_DEV 16
+#define CAM_REQ_MGR_MAX_LINKED_DEV     16
+#define MAX_REQ_SLOTS                  48
+
+#define CAM_REQ_MGR_WATCHDOG_TIMEOUT   5000
+#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT  1000
+#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
+
+#define FORCE_DISABLE_RECOVERY  2
+#define FORCE_ENABLE_RECOVERY   1
+#define AUTO_RECOVERY           0
+
+#define CRM_WORKQ_NUM_TASKS 30
 
 /**
- * enum crm_req_status
- * State machine for life cycle of request in link
- * EMPTY - indicates req slot is empty
- * PENDING - indicates req slot is waiting for reqs from all devs
- * READY - indicates req slot is ready to be sent to devs
- * APPLIED - indicates req slot is sent to devices
- * INVALID - indicates req slot is not in valid state
+ * enum crm_workq_task_type
+ * @codes: to identify which type of task is present
  */
-enum crm_req_status {
-	CRM_REQ_STATUS_EMPTY,
-	CRM_REQ_STATUS_PENDING,
-	CRM_REQ_STATUS_READY,
-	CRM_REQ_STATUS_APPLIED,
-	CRM_REQ_STATUS_INVALID,
+enum crm_workq_task_type {
+	CRM_WORKQ_TASK_GET_DEV_INFO,
+	CRM_WORKQ_TASK_SETUP_LINK,
+	CRM_WORKQ_TASK_DEV_ADD_REQ,
+	CRM_WORKQ_TASK_APPLY_REQ,
+	CRM_WORKQ_TASK_NOTIFY_SOF,
+	CRM_WORKQ_TASK_NOTIFY_ERR,
+	CRM_WORKQ_TASK_SCHED_REQ,
+	CRM_WORKQ_TASK_FLUSH_REQ,
+	CRM_WORKQ_TASK_INVALID,
+};
+
+/**
+ * struct crm_task_payload
+ * @type       : to identify which type of task is present
+ * @u          : union of payload of all types of tasks supported
+ * @sched_req  : contains info of  incoming reqest from CSL to CRM
+ * @flush_info : contains info of cancelled reqest
+ * @dev_req    : contains tracking info of available req id at device
+ * @send_req   : contains info of apply settings to be sent to devs in link
+ * @apply_req  : contains info of which request is applied at device
+ * @notify_sof : contains notification from IFE to CRM about SOF trigger
+ * @notify_err : contains error info happened while processing request
+ * -
+ */
+struct crm_task_payload {
+	enum crm_workq_task_type type;
+	union {
+		struct cam_req_mgr_sched_request        sched_req;
+		struct cam_req_mgr_flush_info           flush_info;
+		struct cam_req_mgr_add_request          dev_req;
+		struct cam_req_mgr_send_request         send_req;
+		struct cam_req_mgr_sof_notify           notify_sof;
+		struct cam_req_mgr_error_notify         notify_err;
+	} u;
+};
+
+/**
+ * enum crm_req_state
+ * State machine for life cycle of request in pd table
+ * EMPTY   : indicates req slot is empty
+ * PENDING : indicates req slot is waiting for reqs from all devs
+ * READY   : indicates req slot is ready to be sent to devs
+ * INVALID : indicates req slot is not in valid state
+ */
+enum crm_req_state {
+	CRM_REQ_STATE_EMPTY,
+	CRM_REQ_STATE_PENDING,
+	CRM_REQ_STATE_READY,
+	CRM_REQ_STATE_INVALID,
+};
+
+/**
+ * enum crm_slot_status
+ * State machine for life cycle of request in input queue
+ * NO_REQ     : empty slot
+ * REQ_ADDED  : new entry in slot
+ * INCOMPLETE : waiting for
+ * APPLIED    : req is sent to devices
+ * INVALID    : invalid state
+ */
+enum crm_slot_status {
+	CRM_SLOT_STATUS_NO_REQ,
+	CRM_SLOT_STATUS_REQ_ADDED,
+	CRM_SLOT_STATUS_REQ_PENDING,
+	CRM_SLOT_STATUS_REQ_APPLIED,
+	CRM_SLOT_STATUS_INVALID,
 };
 
 /**
  * enum cam_req_mgr_link_state
  * State machine for life cycle of link in crm
- * AVAILABLE - indicates link is not in use
- * IDLE - indicates link is reserved but not initialized
- * READY - indicates link is initialized and ready for operation
- * STREAMING - indicates link is receiving triggers and requests
- * BUBBLE_DETECTED - indicates device on link is in bad shape
- * ROLLBACK_STARTED - indicates link had triggered error recovery
- * MAX - indicates link max as invalid
+ * AVAILABLE  : link available
+ * IDLE       : link initialized but not ready yet
+ * READY      : link is ready for use
+ * ERR	      : link has encountered error
+ * MAX        : invalid state
  */
 enum cam_req_mgr_link_state {
 	CAM_CRM_LINK_STATE_AVAILABLE,
 	CAM_CRM_LINK_STATE_IDLE,
 	CAM_CRM_LINK_STATE_READY,
-	CAM_CRM_LINK_STATE_STREAMING,
-	CAM_CRM_LINK_STATE_BUBBLE_DETECTED,
-	CAM_CRM_LINK_STATE_ROLLBACK_STARTED,
-	CAM_CRM_LINK_STATE_DEVICE_STATE_MAX,
+	CAM_CRM_LINK_STATE_ERR,
+	CAM_CRM_LINK_STATE_MAX,
 };
 
 /**
- * struct cam_req_mgr_request_slot
- * @idx: device handle
- * @req_status: state machine for life cycle of a request
- * @request_id: request id value
+ * struct cam_req_mgr_traverse
+ * @idx        : slot index
+ * @result     : contains which all tables were able to apply successfully
+ * @tbl        : pointer of pipeline delay based request table
+ * @apply_data : pointer which various tables will update during traverse
+ * @in_q       : input request queue pointer
  */
-struct cam_req_mgr_request_slot {
+struct cam_req_mgr_traverse {
+	int32_t                       idx;
+	uint32_t                      result;
+	struct cam_req_mgr_req_tbl   *tbl;
+	struct cam_req_mgr_apply     *apply_data;
+	struct cam_req_mgr_req_queue *in_q;
+};
+
+/**
+ * struct cam_req_mgr_apply
+ * @idx      : corresponding input queue slot index
+ * @pd       : pipeline delay of device
+ * @req_id   : req id for dev with above pd to process
+ * @skip_idx: skip applying settings when this is set.
+ */
+struct cam_req_mgr_apply {
 	int32_t idx;
-	enum crm_req_status req_status;
-	int64_t request_id;
+	int32_t pd;
+	int64_t req_id;
+	int32_t skip_idx;
 };
 
 /**
- * struct cam_req_mgr_request_queue
- * @read_index: idx currently being processed
- * @write_index: idx at which incoming req is stored
- * @num_slots: num of req slots i.e. queue depth
- * @req_slot: slots which hold the request info
+ * struct cam_req_mgr_tbl_slot
+ * @idx           : slot index
+ * @req_ready_map : mask tracking which all devices have request ready
+ * @state         : state machine for life cycle of a slot
  */
-struct cam_req_mgr_request_queue {
-	int32_t read_index;
-	int32_t write_index;
-	uint32_t num_slots;
-	struct cam_req_mgr_request_slot *req_slot;
+struct cam_req_mgr_tbl_slot {
+	int32_t             idx;
+	uint32_t            req_ready_map;
+	enum crm_req_state  state;
 };
 
 /**
- * struct cam_req_mgr_frame_settings
- * @request_id: request id to apply
- * @frame_id: frame id for debug purpose
+ * struct cam_req_mgr_req_tbl
+ * @id            : table indetifier
+ * @pd            : pipeline delay of table
+ * @dev_count     : num of devices having same pipeline delay
+ * @dev_mask      : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ *              by this table especially in the beginning or bubble recovery
+ * @next          : pointer to next pipeline delay request table
+ * @pd_delta      : differnce between this table's pipeline delay and next
+ * @num_slots     : number of request slots present in the table
+ * @slot          : array of slots tracking requests availability at devices
  */
-struct cam_req_mgr_frame_settings {
-	int64_t request_id;
-	int64_t frame_id;
+struct cam_req_mgr_req_tbl {
+	int32_t                     id;
+	int32_t                     pd;
+	int32_t                     dev_count;
+	int32_t                     dev_mask;
+	int32_t                     skip_traverse;
+	struct cam_req_mgr_req_tbl *next;
+	int32_t                     pd_delta;
+	int32_t                     num_slots;
+	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
 };
 
 /**
- * struct cam_req_mgr_request_table
- * @pipeline_delay: pipeline delay of this req table
- * @l_devices: list of devices belonging to this p_delay
- * @dev_mask: each dev hdl has unique bit assigned, dev mask tracks if all devs
- *  received req id packet from UMD to process
+ * struct cam_req_mgr_slot
+ * - Internal Book keeping
+ * @idx      : slot index
+ * @skip_idx : if req id in this slot needs to be skipped/not applied
+ * @status   : state machine for life cycle of a slot
+ * - members updated due to external events
+ * @recover  : if user enabled recovery for this request.
+ * @req_id   : mask tracking which all devices have request ready
  */
-struct cam_req_mgr_request_table {
-	uint32_t pipeline_delay;
-	struct list_head l_devices;
-	uint32_t dev_mask;
+struct cam_req_mgr_slot {
+	int32_t               idx;
+	int32_t               skip_idx;
+	enum crm_slot_status  status;
+	int32_t               recover;
+	int64_t               req_id;
+};
+
+/**
+ * struct cam_req_mgr_req_queue
+ * @num_slots   : max num of input queue slots
+ * @slot        : request slot holding incoming request id and bubble info.
+ * @rd_idx      : indicates slot index currently in process.
+ * @wr_idx      : indicates slot index to hold new upcoming req.
+ */
+struct cam_req_mgr_req_queue {
+	int32_t                     num_slots;
+	struct cam_req_mgr_slot     slot[MAX_REQ_SLOTS];
+	int32_t                     rd_idx;
+	int32_t                     wr_idx;
+};
+
+/**
+ * struct cam_req_mgr_req_data
+ * @in_q        : Poiner to Input request queue
+ * @l_tbl       : unique pd request tables.
+ * @num_tbl     : how many unique pd value devices are present
+ * @apply_data	: Holds information about request id for a request
+ * @lock        : mutex lock protecting request data ops.
+ */
+struct cam_req_mgr_req_data {
+	struct cam_req_mgr_req_queue *in_q;
+	struct cam_req_mgr_req_tbl   *l_tbl;
+	int32_t                       num_tbl;
+	struct cam_req_mgr_apply      apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct mutex                  lock;
 };
 
 /**
  * struct cam_req_mgr_connected_device
- *- Device Properties
- * @dev_hdl: device handle
- * @dev_bit: unique bit assigned to device in link
- * -Device progress status
- * @available_req_id: tracks latest available req id at this device
- * @processing_req_id: tracks request id currently processed
+ * - Device Properties
+ * @dev_hdl  : device handle
+ * @dev_bit  : unique bit assigned to device in link
  * - Device characteristics
- * @dev_info: holds dev characteristics such as pipeline delay, dev name
- * @ops: holds func pointer to call methods on this device
- * @parent: pvt data - Pointer to parent link device its connected with
- * @entry: entry to the list of connected devices in link
+ * @pd_tbl   : tracks latest available req id at this device
+ * @dev_info : holds dev characteristics such as pipeline delay, dev name
+ * @ops      : holds func pointer to call methods on this device
+ * @parent   : pvt data - like link which this dev hdl belongs to
  */
 struct cam_req_mgr_connected_device {
-	int32_t dev_hdl;
-	int64_t dev_bit;
-	int64_t available_req_id;
-	int64_t processing_req_id;
-	struct cam_req_mgr_device_info dev_info;
-	struct cam_req_mgr_kmd_ops *ops;
-	void *parent;
-	struct list_head entry;
+	int32_t                         dev_hdl;
+	int64_t                         dev_bit;
+	struct cam_req_mgr_req_tbl     *pd_tbl;
+	struct cam_req_mgr_device_info  dev_info;
+	struct cam_req_mgr_kmd_ops     *ops;
+	void                           *parent;
 };
 
 /**
  * struct cam_req_mgr_core_link
- * - Link Properties
- * @link_hdl: Link identifier
- * @num_connections: num of connected devices to this link
- * @max_pipeline_delay: Max of pipeline delay of all connected devs
- * - Input request queue
- * @in_requests: Queue to hold incoming request hints from CSL
- * @workq: Pointer to handle workq related jobs
+ * -  Link Properties
+ * @link_hdl       : Link identifier
+ * @num_devs       : num of connected devices to this link
+ * @max_delay      : Max of pipeline delay of all connected devs
+ * @workq          : Pointer to handle workq related jobs
+ * @pd_mask        : each set bit indicates the device with pd equal to bit
+ *                   position is available.
  * - List of connected devices
- * @l_devices: List of connected devices to this link
- * @fs_list: Holds the request id which each device in link will consume.
- * @req_table: table to keep track of req ids recived at each dev handle
+ * @l_dev          : List of connected devices to this link
+ * - Request handling data struct
+ * @req            : req data holder.
+ * - Timer
+ * @watchdog       : watchdog timer to recover from sof freeze
  * - Link private data
- * @link_state: link state cycle
- * @parent: pvt data - like session info
- * @link_head: List head of connected devices
- * @lock: spin lock to guard link data operations
+ * @workq_comp     : conditional variable to block user thread for workq to
+ *                   finish schedule request processing
+ * @state          : link state machine
+ * @parent         : pvt data - link's parent is session
+ * @lock           : mutex lock to guard link data operations
  */
 struct cam_req_mgr_core_link {
-	int32_t link_hdl;
-	int32_t num_connections;
-	enum cam_pipeline_delay max_pipeline_delay;
-	struct cam_req_mgr_request_queue in_requests;
-	struct cam_req_mgr_core_workq *workq;
-	struct cam_req_mgr_connected_device *l_devices;
-	struct cam_req_mgr_frame_settings fs_list[CAM_REQ_MGR_MAX_LINKED_DEV];
-	struct cam_req_mgr_request_table req_table[CAM_PIPELINE_DELAY_MAX];
-	enum cam_req_mgr_link_state link_state;
-	void *parent;
-	struct list_head link_head;
-	spinlock_t lock;
+	int32_t                              link_hdl;
+	int32_t                              num_devs;
+	enum cam_pipeline_delay              max_delay;
+	struct cam_req_mgr_core_workq       *workq;
+	int32_t                              pd_mask;
+	struct cam_req_mgr_connected_device *l_dev;
+	struct cam_req_mgr_req_data          req;
+	struct cam_req_mgr_timer            *watchdog;
+	struct completion                    workq_comp;
+	enum cam_req_mgr_link_state          state;
+	void                                *parent;
+	struct mutex                         lock;
 };
 
 /**
  * struct cam_req_mgr_core_session
  * - Session Properties
- * @session_hdl: session identifier
- * @num_active_links: num of active links for current session
+ * @session_hdl        : session identifier
+ * @num_links          : num of active links for current session
  * - Links of this session
- * @links: pointer to array of links within session
+ * @links              : pointer to array of links within session
+ * @in_q               : Input request queue one per session
  * - Session private data
- * @entry: pvt data - entry in the list of sessions
- * @lock: pvt data - spin lock to guard session data
+ * @entry              : pvt data - entry in the list of sessions
+ * @lock               : pvt data - spin lock to guard session data
+ * - Debug data
+ * @force_err_recovery : For debugging, we can force bubble recovery
+ *                       to be always ON or always OFF using debugfs.
  */
 struct cam_req_mgr_core_session {
-	int32_t session_hdl;
-	uint32_t num_active_links;
-	struct cam_req_mgr_core_link links[MAX_LINKS_PER_SESSION];
-	struct list_head entry;
-	spinlock_t lock;
+	int32_t                       session_hdl;
+	uint32_t                      num_links;
+	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
+	struct cam_req_mgr_req_queue  in_q;
+	struct list_head              entry;
+	struct mutex                  lock;
+	int32_t                       force_err_recovery;
 };
 
 /**
  * struct cam_req_mgr_core_device
  * - Core camera request manager data struct
- * @session_head: list head holding sessions
- * @crm_lock: mutex lock to protect session creation & destruction
+ * @session_head : list head holding sessions
+ * @crm_lock     : mutex lock to protect session creation & destruction
  */
 struct cam_req_mgr_core_device {
-	struct list_head session_head;
-	struct mutex crm_lock;
+	struct list_head             session_head;
+	struct mutex                 crm_lock;
 };
 
-/* cam_req_mgr_dev to cam_req_mgr_core internal functions */
 /**
  * cam_req_mgr_create_session()
- * @brief: creates session
- * @ses_info: output param for session handle
+ * @brief    : creates session
+ * @ses_info : output param for session handle
  *
- * Called as part of session creation.
+ * called as part of session creation.
  */
-int cam_req_mgr_create_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_destroy_session()
- * @brief: destroy session
- * @ses_info: session handle info, input param
+ * @brief    : destroy session
+ * @ses_info : session handle info, input param
  *
  * Called as part of session destroy
  * return success/failure
  */
-int cam_req_mgr_destroy_session(
-	struct cam_req_mgr_session_info *ses_info);
+int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
 
 /**
  * cam_req_mgr_link()
- * @brief: creates a link for a session
- * @link_info: handle and session info to create a link
+ * @brief     : creates a link for a session
+ * @link_info : handle and session info to create a link
  *
- * Link is formed in a session for multiple devices. It creates
+ * link is formed in a session for multiple devices. it creates
  * a unique link handle for the link and is specific to a
  * session. Returns link handle
  */
@@ -228,10 +359,10 @@
 
 /**
  * cam_req_mgr_unlink()
- * @brief: destroy a link in a session
- * @unlink_info: session and link handle info
+ * @brief       : destroy a link in a session
+ * @unlink_info : session and link handle info
  *
- * Link is destroyed in a session
+ * link is destroyed in a session
  */
 int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info);
 
@@ -244,11 +375,11 @@
 	struct cam_req_mgr_sched_request *sched_req);
 
 /**
- * cam_req_mgr_sync_mode()
+ * cam_req_mgr_sync_link()
  * @brief: sync for links in a session
  * @sync_links: session, links info and master link info
  */
-int cam_req_mgr_sync_mode(struct cam_req_mgr_sync_mode *sync_links);
+int cam_req_mgr_sync_link(struct cam_req_mgr_sync_mode *sync_links);
 
 /**
  * cam_req_mgr_flush_requests()
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index cf2fe7f..2a831e8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -15,6 +15,16 @@
 #define CRM_TRACE_ENABLE 0
 #define CRM_DEBUG_MUTEX 0
 
+#define SET_SUCCESS_BIT(ret, pd)	{\
+	(ret) |= (1 << (pd));	\
+	}
+
+#define SET_FAILURE_BIT(ret, pd)	{\
+	(ret) &= (0 << (pd));	\
+	}
+
+#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
+
 #if (CRM_TRACE_ENABLE == 1)
 	#define CRM_DBG(fmt, args...) do { \
 	trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
new file mode 100644
index 0000000..19833d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_debug.h"
+
+#define MAX_SESS_INFO_LINE_BUFF_LEN 256
+
+static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+
+static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
+{
+	struct cam_req_mgr_core_device  *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+	int rc = 0;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			session->force_err_recovery = val;
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return rc;
+}
+
+static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val)
+{
+	struct cam_req_mgr_core_device *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		session = list_first_entry(&core_dev->session_head,
+			struct cam_req_mgr_core_session,
+			entry);
+		*val = session->force_err_recovery;
+	}
+	mutex_unlock(&core_dev->crm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery,
+	cam_req_mgr_debug_set_bubble_recovery, "%lld\n");
+
+static int session_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t session_info_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = sess_info_buffer;
+	char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0};
+	struct cam_req_mgr_core_device *core_dev =
+		(struct cam_req_mgr_core_device *) t_file->private_data;
+	struct cam_req_mgr_core_session *session;
+
+	memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			snprintf(line_buffer, sizeof(line_buffer),
+				"session_hdl = %x \t"
+				"num_links = %d\n",
+				session->session_hdl, session->num_links);
+			strlcat(out_buffer, line_buffer,
+				sizeof(sess_info_buffer));
+			for (i = 0; i < session->num_links; i++) {
+				snprintf(line_buffer, sizeof(line_buffer),
+					"link_hdl[%d] = 0x%x, num_devs connected = %d\n",
+					i, session->links[i]->link_hdl,
+					session->links[i]->num_devs);
+				strlcat(out_buffer, line_buffer,
+					sizeof(sess_info_buffer));
+			}
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t session_info_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	return 0;
+}
+
+static const struct file_operations session_info = {
+	.open = session_info_open,
+	.read = session_info_read,
+	.write = session_info_write,
+};
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
+{
+	struct dentry *debugfs_root;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "cam_req_mgr");
+	debugfs_root = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_root)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("sessions_info", 0644,
+		debugfs_root, core_dev, &session_info))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bubble_recovery", 0644,
+		debugfs_root, core_dev, &bubble_recovery))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
new file mode 100644
index 0000000..82ac764
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_DEBUG_H_
+#define _CAM_REQ_MGR_DEBUG_H_
+
+#include <linux/debugfs.h>
+#include "cam_req_mgr_core.h"
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index f3af1bd..13affe9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
 #include "cam_subdev.h"
+#include "cam_mem_mgr.h"
 
 #define CAM_REQ_MGR_EVENT_MAX 30
 
@@ -115,7 +116,18 @@
 	spin_unlock_bh(&g_dev.cam_eventq_lock);
 
 	g_dev.open_cnt++;
+	rc = cam_mem_mgr_init();
+	if (rc) {
+		g_dev.open_cnt--;
+		pr_err("mem mgr init failed\n");
+		goto mem_mgr_init_fail;
+	}
 
+	mutex_unlock(&g_dev.cam_lock);
+	return rc;
+
+mem_mgr_init_fail:
+	v4l2_fh_release(filep);
 end:
 	mutex_unlock(&g_dev.cam_lock);
 	return rc;
@@ -154,6 +166,7 @@
 	spin_unlock_bh(&g_dev.cam_eventq_lock);
 
 	cam_req_mgr_util_free_hdls();
+	cam_mem_mgr_deinit();
 	mutex_unlock(&g_dev.cam_lock);
 
 	return 0;
@@ -313,7 +326,85 @@
 			return -EFAULT;
 		}
 
-		rc = cam_req_mgr_sync_mode(&sync_mode);
+		rc = cam_req_mgr_sync_link(&sync_mode);
+		}
+		break;
+	case CAM_REQ_MGR_ALLOC_BUF: {
+		struct cam_mem_mgr_alloc_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_alloc_and_map(&cmd);
+		if (!rc)
+			if (copy_to_user((void *)k_ioctl->handle,
+				&cmd, k_ioctl->size)) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_MAP_BUF: {
+		struct cam_mem_mgr_map_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_map(&cmd);
+		if (!rc)
+			if (copy_to_user((void *)k_ioctl->handle,
+				&cmd, k_ioctl->size)) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_RELEASE_BUF: {
+		struct cam_mem_mgr_release_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_release(&cmd);
+		}
+		break;
+	case CAM_REQ_MGR_CACHE_OPS: {
+		struct cam_mem_cache_ops_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_cache_ops(&cmd);
+		if (rc)
+			rc = -EINVAL;
 		}
 		break;
 	default:
@@ -444,6 +535,7 @@
 static int cam_req_mgr_remove(struct platform_device *pdev)
 {
 	cam_req_mgr_core_device_deinit();
+	cam_mem_mgr_deinit();
 	cam_req_mgr_util_deinit();
 	cam_media_device_cleanup();
 	cam_video_device_cleanup();
@@ -482,6 +574,12 @@
 		goto req_mgr_util_fail;
 	}
 
+	rc = cam_mem_mgr_init();
+	if (rc) {
+		pr_err("mem mgr init failed\n");
+		goto mem_mgr_init_fail;
+	}
+
 	rc = cam_req_mgr_core_device_init();
 	if (rc) {
 		pr_err("core device setup failed\n");
@@ -493,8 +591,12 @@
 	return rc;
 
 req_mgr_core_fail:
+	cam_mem_mgr_deinit();
+mem_mgr_init_fail:
 	cam_req_mgr_util_deinit();
 req_mgr_util_fail:
+	mutex_destroy(&g_dev.dev_lock);
+	mutex_destroy(&g_dev.cam_lock);
 	cam_video_device_cleanup();
 video_setup_fail:
 	cam_media_device_cleanup();
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 174a725..91860f6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -18,15 +18,14 @@
 #include "cam_req_mgr_core_defs.h"
 #include "cam_req_mgr_util.h"
 
-/* Forward declarations */
 struct cam_req_mgr_sof_notify;
 struct cam_req_mgr_error_notify;
 struct cam_req_mgr_add_request;
 struct cam_req_mgr_device_info;
 struct cam_req_mgr_core_dev_link_setup;
 struct cam_req_mgr_apply_request;
-
-/*Ops table for req mgr - kmd communication */
+struct cam_req_mgr_flush_request;
+struct cam_req_mgr_link_evt_data;
 
 /* Request Manager -- camera device driver interface */
 /**
@@ -44,21 +43,25 @@
  * @brief: cam req mgr to camera device drivers
  *
  * @cam_req_mgr_get_dev_info: to fetch details about device linked
- * @cam_req_mgr_link_setup: to establish link with device for a session
- * @cam_req_mgr_notify_err: to broadcast error happened on link for request id
- * @cam_req_mgr_apply_req: CRM asks device to apply certain request id.
+ * @cam_req_mgr_link_setup  : to establish link with device for a session
+ * @cam_req_mgr_notify_err  : to broadcast error happened on link for request id
+ * @cam_req_mgr_apply_req   : CRM asks device to apply certain request id.
+ * @cam_req_mgr_flush_req   : Flush or cancle request
+ * cam_req_mgr_process_evt  : generic events
  */
 typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
 typedef int (*cam_req_mgr_link_setup)(
 	struct cam_req_mgr_core_dev_link_setup *);
 typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *);
+typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
+typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
 
 /**
- * @brief: cam_req_mgr_crm_cb - func table
+ * @brief      : cam_req_mgr_crm_cb - func table
  *
- * @notify_sof: payload for sof indication event
- * @notify_err: payload for different error occurred at device
- * @add_req: pauload to inform which device and what request is received
+ * @notify_sof : payload for sof indication event
+ * @notify_err : payload for different error occurred at device
+ * @add_req    : payload to inform which device and what request is received
  */
 struct cam_req_mgr_crm_cb {
 	cam_req_mgr_notify_sof  notify_sof;
@@ -67,26 +70,30 @@
 };
 
 /**
- * @brief: cam_req_mgr_kmd_ops - func table
+ * @brief        : cam_req_mgr_kmd_ops - func table
  *
- * @get_dev_info: payload to fetch device details
- * @link_setup: payload to establish link with device
- * @apply_req: payload to apply request id on a device linked
+ * @get_dev_info : payload to fetch device details
+ * @link_setup   : payload to establish link with device
+ * @apply_req    : payload to apply request id on a device linked
+ * @flush_req    : payload to flush request
+ * @process_evt  : payload to generic event
  */
 struct cam_req_mgr_kmd_ops {
 	cam_req_mgr_get_dev_info      get_dev_info;
 	cam_req_mgr_link_setup        link_setup;
 	cam_req_mgr_apply_req         apply_req;
+	cam_req_mgr_flush_req        flush_req;
+	cam_req_mgr_process_evt      process_evt;
 };
 
 /**
  * enum cam_pipeline_delay
- * @brief: enumerator for different pipeline delays in camera
+ * @brief     : enumerator for different pipeline delays in camera
  *
- * @DELAY_0: device processed settings on same frame
- * @DELAY_1: device processed settings after 1 frame
- * @DELAY_2: device processed settings after 2 frames
- * @DELAY_MAX: maximum supported pipeline delay
+ * @DELAY_0   : device processed settings on same frame
+ * @DELAY_1   : device processed settings after 1 frame
+ * @DELAY_2   : device processed settings after 2 frames
+ * @DELAY_MAX : maximum supported pipeline delay
  */
 enum cam_pipeline_delay {
 	CAM_PIPELINE_DELAY_0,
@@ -97,11 +104,11 @@
 
 /**
  * enum cam_req_status
- * @brief: enumerator for request status
+ * @brief   : enumerator for request status
  *
- * @SUCCESS: device processed settings successfully
- * @FAILED: device processed settings failed
- * @MAX: invalid status value
+ * @SUCCESS : device processed settings successfully
+ * @FAILED  : device processed settings failed
+ * @MAX     : invalid status value
  */
 enum cam_req_status {
 	CAM_REQ_STATUS_SUCCESS,
@@ -111,15 +118,15 @@
 
 /**
  * enum cam_req_mgr_device_error
- * @brief: enumerator for different errors occurred at device
+ * @brief      : enumerator for different errors occurred at device
  *
- * @NOT_FOUND: settings asked by request manager is not found
- * @BUBBLE: device hit timing issue and is able to recover
- * @FATAL: device is in bad shape and can not recover from error
- * @PAGE_FAULT: Page fault while accessing memory
- * @OVERFLOW: Bus Overflow for IFE/VFE
- * @TIMEOUT: Timeout from cci or bus.
- * @MAX: Invalid error value
+ * @NOT_FOUND  : settings asked by request manager is not found
+ * @BUBBLE     : device hit timing issue and is able to recover
+ * @FATAL      : device is in bad shape and can not recover from error
+ * @PAGE_FAULT : Page fault while accessing memory
+ * @OVERFLOW   : Bus Overflow for IFE/VFE
+ * @TIMEOUT    : Timeout from cci or bus.
+ * @MAX        : Invalid error value
  */
 enum cam_req_mgr_device_error {
 	CRM_KMD_ERR_NOT_FOUND,
@@ -133,17 +140,17 @@
 
 /**
  * enum cam_req_mgr_device_id
- * @brief: enumerator for different devices in subsystem
+ * @brief       : enumerator for different devices in subsystem
  *
- * @CAM_REQ_MGR: request manager itself
- * @SENSOR: sensor device
- * @FLASH: LED flash or dual LED device
- * @ACTUATOR: lens mover
- * @IFE: Image processing device
- * @EXTERNAL_1: third party device
- * @EXTERNAL_2: third party device
- * @EXTERNAL_3: third party device
- * @MAX: invalid device id
+ * @CAM_REQ_MGR : request manager itself
+ * @SENSOR      : sensor device
+ * @FLASH       : LED flash or dual LED device
+ * @ACTUATOR    : lens mover
+ * @IFE         : Image processing device
+ * @EXTERNAL_1  : third party device
+ * @EXTERNAL_2  : third party device
+ * @EXTERNAL_3  : third party device
+ * @MAX         : invalid device id
  */
 enum cam_req_mgr_device_id {
 	CAM_REQ_MGR_DEVICE,
@@ -158,11 +165,22 @@
 };
 
 /* Camera device driver to Req Mgr device interface */
+
+/**
+ * enum cam_req_mgr_link_evt_type
+ * @CAM_REQ_MGR_LINK_EVT_ERR:
+ * @CAM_REQ_MGR_LINK_EVT_MAX:
+ */
+enum cam_req_mgr_link_evt_type {
+	CAM_REQ_MGR_LINK_EVT_ERR,
+	CAM_REQ_MGR_LINK_EVT_MAX,
+};
+
 /**
  * struct cam_req_mgr_sof_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @frame_id: frame id for internal tracking
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @frame_id : frame id for internal tracking
  */
 struct cam_req_mgr_sof_notify {
 	int32_t link_hdl;
@@ -172,11 +190,10 @@
 
 /**
  * struct cam_req_mgr_error_notify
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which hit error
- * @error: what error device hit while processing this req
- *
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which hit error
+ * @error    : what error device hit while processing this req
  */
 struct cam_req_mgr_error_notify {
 	int32_t link_hdl;
@@ -187,9 +204,9 @@
 
 /**
  * struct cam_req_mgr_add_request
- * @link_hdl: link identifier
- * @dev_hdl: device handle which has sent this req id
- * @req_id: req id which device is ready to process
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which device is ready to process
  *
  */
 struct cam_req_mgr_add_request {
@@ -202,48 +219,91 @@
 /* CRM to KMD devices */
 /**
  * struct cam_req_mgr_device_info
- * @dev_hdl: Input_param : device handle for reference
- * @name: link link or unlink
- * @dev_id: device id info
- * @p_delay: delay between time settings applied and take effect
+ * @dev_hdl : Input_param : device handle for reference
+ * @name    : link link or unlink
+ * @dev_id  : device id info
+ * @p_delay : delay between time settings applied and take effect
  *
  */
 struct cam_req_mgr_device_info {
-	int32_t dev_hdl;
-	char name[256];
-	enum cam_req_mgr_device_id dev_id;
-	enum cam_pipeline_delay p_delay;
+	int32_t                     dev_hdl;
+	char                        name[256];
+	enum cam_req_mgr_device_id  dev_id;
+	enum cam_pipeline_delay     p_delay;
 };
 
 /**
  * struct cam_req_mgr_core_dev_link_setup
- * @link_enable: link link or unlink
- * @link_hdl: link identifier
- * @dev_hdl: device handle for reference
- * @max_delay: max pipeline delay on this link
- * @crm_cb: callback funcs to communicate with req mgr
+ * @link_enable : link link or unlink
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for reference
+ * @max_delay   : max pipeline delay on this link
+ * @crm_cb      : callback funcs to communicate with req mgr
  *
  */
 struct cam_req_mgr_core_dev_link_setup {
-	bool link_enable;
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	enum cam_pipeline_delay max_delay;
+	int32_t                    link_enable;
+	int32_t                    link_hdl;
+	int32_t                    dev_hdl;
+	enum cam_pipeline_delay    max_delay;
 	struct cam_req_mgr_crm_cb *crm_cb;
 };
 
 /**
  * struct cam_req_mgr_apply_request
- * @link_id: link identifier
- * @dev_hdl: device handle for cross check
- * @request_id: request id settings to apply
- * @report_if_bubble: report to crm if failure in applying
+ * @link_hdl         : link identifier
+ * @dev_hdl          : device handle for cross check
+ * @request_id       : request id settings to apply
+ * @report_if_bubble : report to crm if failure in applying
  *
  */
 struct cam_req_mgr_apply_request {
+	int32_t    link_hdl;
+	int32_t    dev_hdl;
+	int64_t    request_id;
+	int32_t    report_if_bubble;
+};
+
+/**
+ * struct cam_req_mgr_flush_request
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for cross check
+ * @type        : cancel request type flush all or a request
+ * @request_id  : request id to cancel
+ *
+ */
+struct cam_req_mgr_flush_request {
+	int32_t     link_hdl;
+	int32_t     dev_hdl;
+	uint32_t    type;
+	int64_t     req_id;
+};
+
+/**
+ * struct cam_req_mgr_event_data
+ * @link_hdl : link handle
+ * @req_id   : request id
+ *
+ */
+struct cam_req_mgr_link_evt_data {
 	int32_t link_hdl;
 	int32_t dev_hdl;
-	int64_t request_id;
-	int32_t report_if_bubble;
+	int64_t req_id;
+
+	enum cam_req_mgr_link_evt_type evt_type;
+	union {
+		enum cam_req_mgr_device_error error;
+	} u;
+};
+
+/**
+ * struct cam_req_mgr_send_request
+ * @link_hdl   : link identifier
+ * @idx        : slot idx
+ *
+ */
+struct cam_req_mgr_send_request {
+	int32_t    link_hdl;
+	struct cam_req_mgr_req_queue *in_q;
 };
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
new file mode 100644
index 0000000..9da445d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_timer.h"
+
+void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
+{
+	if (!crm_timer)
+		return;
+	CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+		crm_timer->expires, jiffies);
+	mod_timer(&crm_timer->sys_timer,
+		(jiffies + msecs_to_jiffies(crm_timer->expires)));
+}
+
+void crm_timer_callback(unsigned long data)
+{
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+	if (!timer) {
+		CRM_ERR("NULL timer");
+		return;
+	}
+	CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+	crm_timer_reset(timer);
+}
+
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires)
+{
+	CRM_DBG("new time %d", expires);
+	if (crm_timer) {
+		crm_timer->expires = expires;
+		crm_timer_reset(crm_timer);
+	}
+}
+
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long))
+{
+	int                       ret = 0;
+	struct cam_req_mgr_timer *crm_timer = NULL;
+
+	CRM_DBG("init timer %d %pK", expires, *timer);
+	if (*timer == NULL) {
+		crm_timer = (struct cam_req_mgr_timer *)
+			kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
+		if (!crm_timer) {
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		if (timer_cb != NULL)
+			crm_timer->timer_cb = timer_cb;
+		else
+			crm_timer->timer_cb = crm_timer_callback;
+
+		crm_timer->expires = expires;
+		crm_timer->parent = parent;
+		setup_timer(&crm_timer->sys_timer,
+			crm_timer->timer_cb, (unsigned long)crm_timer);
+		crm_timer_reset(crm_timer);
+		*timer = crm_timer;
+	} else {
+		CRM_WARN("Timer already exists!!");
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
+{
+	CRM_DBG("destroy timer %pK", *crm_timer);
+	if (*crm_timer) {
+		del_timer(&(*crm_timer)->sys_timer);
+		kfree(*crm_timer);
+		*crm_timer = NULL;
+	}
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
new file mode 100644
index 0000000..4d600ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_TIMER_H_
+#define _CAM_REQ_MGR_TIMER_H_
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core_defs.h"
+
+/** struct cam_req_mgr_timer
+ * @expires   : timeout value for timer
+ * @sys_timer : system timer variable
+ * @parent    : priv data - link pointer
+ * @timer_cb  : callback func which will be called when timeout expires
+ */
+struct cam_req_mgr_timer {
+	int32_t             expires;
+	struct timer_list   sys_timer;
+	void               *parent;
+	void              (*timer_cb)(unsigned long data);
+};
+
+/**
+ * crm_timer_modify()
+ * @brief : allows ser to modify expiry time.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires);
+
+/**
+ * crm_timer_reset()
+ * @brief : destroys the timer allocated.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_reset(struct cam_req_mgr_timer *timer);
+
+/**
+ * crm_timer_init()
+ * @brief    : create a new general purpose timer.
+ *             timer utility takes care of allocating memory and deleting
+ * @timer    : double pointer to new timer allocated
+ * @expires  : Timeout value to fire callback
+ * @parent   : void pointer which caller can use for book keeping
+ * @timer_cb : caller can chose to use its own callback function when
+ *             timer fires the timeout. If no value is set timer util
+ *             will use default.
+ */
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(unsigned long));
+
+/**
+ * crm_timer_exit()
+ * @brief : destroys the timer allocated.
+ * @timer : timer pointer which will be freed
+ */
+void crm_timer_exit(struct cam_req_mgr_timer **timer);
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 1f6a97a..f53e41c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,7 +12,7 @@
 
 #include "cam_req_mgr_workq.h"
 
-/* workqueue's task manager methods */
+
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
@@ -21,7 +21,7 @@
 	if (!workq)
 		return NULL;
 
-	spin_lock(&workq->task.lock);
+	spin_lock_bh(&workq->lock_bh);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -31,9 +31,9 @@
 		atomic_sub(1, &workq->task.free_cnt);
 		list_del_init(&task->entry);
 	}
-end:
-	spin_unlock(&workq->task.lock);
 
+end:
+	spin_unlock_bh(&workq->lock_bh);
 	return task;
 }
 
@@ -42,17 +42,20 @@
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
 
+	spin_lock_bh(&workq->lock_bh);
+	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
 	task->priv = NULL;
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
+	spin_unlock_bh(&workq->lock_bh);
 }
 
 /**
  * cam_req_mgr_process_task() - Process the enqueued task
- * @task: pointer to task worker thread shall process
+ * @task: pointer to task workq thread shall process
  */
 static int cam_req_mgr_process_task(struct crm_workq_task *task)
 {
@@ -62,31 +65,10 @@
 		return -EINVAL;
 
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
-
-	switch (task->type) {
-	case CRM_WORKQ_TASK_SCHED_REQ:
-	case CRM_WORKQ_TASK_DEV_ADD_REQ:
-	case CRM_WORKQ_TASK_NOTIFY_SOF:
-	case CRM_WORKQ_TASK_NOTIFY_ACK:
-	case CRM_WORKQ_TASK_NOTIFY_ERR:
-		if (task->process_cb)
-			task->process_cb(task->priv, &task->u);
-		else
-			CRM_WARN("FATAL:no task handler registered for workq!");
-		break;
-	case CRM_WORKQ_TASK_GET_DEV_INFO:
-	case CRM_WORKQ_TASK_SETUP_LINK:
-	case CRM_WORKQ_TASK_APPLY_REQ:
-		/* These tasks are not expected to be queued to
-		 * workque at the present
-		 */
-		CRM_DBG("Not supported");
-		break;
-	case CRM_WORKQ_TASK_INVALID:
-	default:
-		CRM_ERR("Invalid task type %x", task->type);
-		break;
-	}
+	if (task->process_cb)
+		task->process_cb(task->priv, task->payload);
+	else
+		CRM_WARN("FATAL:no task handler registered for workq");
 	cam_req_mgr_workq_put_task(task);
 
 	return 0;
@@ -99,8 +81,8 @@
 static void cam_req_mgr_process_workq(struct work_struct *w)
 {
 	struct cam_req_mgr_core_workq *workq = NULL;
-	struct crm_workq_task *task, *task_save;
-
+	struct crm_workq_task         *task, *task_save;
+	int32_t                        i = CRM_TASK_PRIORITY_0;
 	if (!w) {
 		CRM_ERR("NULL task pointer can not schedule");
 		return;
@@ -108,19 +90,44 @@
 	workq = (struct cam_req_mgr_core_workq *)
 		container_of(w, struct cam_req_mgr_core_workq, work);
 
-	list_for_each_entry_safe(task, task_save,
-		&workq->task.process_head, entry) {
-		atomic_sub(1, &workq->task.pending_cnt);
-		spin_lock(&workq->task.lock);
-		list_del_init(&task->entry);
-		spin_unlock(&workq->task.lock);
-		cam_req_mgr_process_task(task);
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				atomic_sub(1, &workq->task.pending_cnt);
+				cam_req_mgr_process_task(task);
+			}
+			CRM_DBG("processed task %pK free_cnt %d",
+				task, atomic_read(&workq->task.free_cnt));
+		}
+		i++;
 	}
-	CRM_DBG("processed task %p free_cnt %d",
-		task, atomic_read(&workq->task.free_cnt));
 }
 
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task)
+void crm_workq_clear_q(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                 i = CRM_TASK_PRIORITY_0;
+	struct crm_workq_task  *task, *task_save;
+
+	CRM_DBG("pending_cnt %d",
+		atomic_read(&workq->task.pending_cnt));
+
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		if (!list_empty(&workq->task.process_head[i])) {
+			list_for_each_entry_safe(task, task_save,
+				&workq->task.process_head[i], entry) {
+				cam_req_mgr_workq_put_task(task);
+				CRM_WARN("flush task %pK, %d, cnt %d",
+					task, i, atomic_read(
+					&workq->task.free_cnt));
+			}
+		}
+		i++;
+	}
+}
+
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio)
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
@@ -132,31 +139,33 @@
 	}
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
 	if (!workq) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
+		CRM_DBG("NULL workq pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 	if (!workq->job) {
-		CRM_WARN("NULL worker pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
 
+	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock(&workq->task.lock);
+		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
-	spin_lock(&workq->task.lock);
+	task->priv = priv;
+	task->priority =
+		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
+		? prio : CRM_TASK_PRIORITY_0;
 	list_add_tail(&task->entry,
-		&workq->task.process_head);
-	spin_unlock(&workq->task.lock);
+		&workq->task.process_head[task->priority]);
 	atomic_add(1, &workq->task.pending_cnt);
-	CRM_DBG("enq task %p pending_cnt %d",
+	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-
+	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
@@ -164,7 +173,8 @@
 	return rc;
 }
 
-int cam_req_mgr_workq_create(char *name, struct cam_req_mgr_core_workq **workq)
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+	struct cam_req_mgr_core_workq **workq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -189,20 +199,35 @@
 
 		/* Workq attributes initialization */
 		INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
+		spin_lock_init(&crm_workq->lock_bh);
+		CRM_DBG("LOCK_DBG workq %s lock %pK",
+			name, &crm_workq->lock_bh);
 
 		/* Task attributes initialization */
-		spin_lock_init(&crm_workq->task.lock);
 		atomic_set(&crm_workq->task.pending_cnt, 0);
 		atomic_set(&crm_workq->task.free_cnt, 0);
-		INIT_LIST_HEAD(&crm_workq->task.process_head);
+		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
+			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
-		memset(crm_workq->task.pool, 0,
-			sizeof(struct crm_workq_task) *
-			CRM_WORKQ_NUM_TASKS);
-		for (i = 0; i < CRM_WORKQ_NUM_TASKS; i++) {
+		crm_workq->task.num_task = num_tasks;
+		crm_workq->task.pool = (struct crm_workq_task *)
+			kzalloc(sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task,
+				GFP_KERNEL);
+		if (!crm_workq->task.pool) {
+			CRM_WARN("Insufficient memory %lu",
+				sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task);
+			kfree(crm_workq);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < crm_workq->task.num_task; i++) {
 			task = &crm_workq->task.pool[i];
 			task->parent = (void *)crm_workq;
 			/* Put all tasks in free pool */
+			list_add_tail(&task->entry,
+			&crm_workq->task.process_head[CRM_TASK_PRIORITY_0]);
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
@@ -213,15 +238,16 @@
 	return 0;
 }
 
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *crm_workq)
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
-	CRM_DBG("destroy workque %p", crm_workq);
-	if (crm_workq) {
-		if (crm_workq->job) {
-			destroy_workqueue(crm_workq->job);
-			crm_workq->job = NULL;
+	CRM_DBG("destroy workque %pK", crm_workq);
+	if (*crm_workq) {
+		crm_workq_clear_q(*crm_workq);
+		if ((*crm_workq)->job) {
+			destroy_workqueue((*crm_workq)->job);
+			(*crm_workq)->job = NULL;
 		}
-		kfree(crm_workq);
-		crm_workq = NULL;
+		kfree(*crm_workq);
+		*crm_workq = NULL;
 	}
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 6b36abc..7d8ca59 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _CAM_WORKER_H_
-#define _CAM_WORKER_H_
+#ifndef _CAM_REQ_MGR_WORKQ_H_
+#define _CAM_REQ_MGR_WORKQ_H_
 
 #include<linux/kernel.h>
 #include<linux/module.h>
@@ -23,99 +23,78 @@
 
 #include "cam_req_mgr_core.h"
 
-/* Macros */
-#define CRM_WORKQ_NUM_TASKS 30
-
-/**enum crm_workq_task_type
- * @codes: to identify which type of task is present
- */
-enum crm_workq_task_type {
-	CRM_WORKQ_TASK_GET_DEV_INFO,
-	CRM_WORKQ_TASK_SETUP_LINK,
-	CRM_WORKQ_TASK_SCHED_REQ,
-	CRM_WORKQ_TASK_DEV_ADD_REQ,
-	CRM_WORKQ_TASK_APPLY_REQ,
-	CRM_WORKQ_TASK_NOTIFY_SOF,
-	CRM_WORKQ_TASK_NOTIFY_ACK,
-	CRM_WORKQ_TASK_NOTIFY_ERR,
-	CRM_WORKQ_TASK_INVALID,
+/* Task priorities, lower the number higher the priority*/
+enum crm_task_priority {
+	CRM_TASK_PRIORITY_0 = 0,
+	CRM_TASK_PRIORITY_1 = 1,
+	CRM_TASK_PRIORITY_MAX = 2,
 };
 
 /** struct crm_workq_task
- * @type: type of task
- * u -
- * @csl_req: contains info of  incoming reqest from CSL to CRM
- * @dev_req: contains tracking info of available req id at device
- * @apply_req: contains info of which request is applied at device
- * @notify_sof: contains notification from IFE to CRM about SOF trigger
- * @notify_err: contains error inf happened while processing request
- * @dev_info: contains info about which device is connected with CRM
- * @link_setup: contains info about new link being setup
- * -
- * @process_cb: registered callback called by workq when task enqueued is ready
- *  for processing in workq thread context
- * @parent: workq's parent is link which is enqqueing taks to this workq
- * @entry: list head of this list entry is worker's empty_head
- * @cancel: if caller has got free task from pool but wants to abort or put
- *  back without using it
- * @priv: when task is enqueuer caller can attach cookie
+ * @priority   : caller can assign priority to task based on type.
+ * @payload    : depending of user of task this payload type will change
+ * @process_cb : registered callback called by workq when task enqueued is
+ *               ready for processing in workq thread context
+ * @parent     : workq's parent is link which is enqqueing taks to this workq
+ * @entry      : list head of this list entry is worker's empty_head
+ * @cancel     : if caller has got free task from pool but wants to abort
+ *               or put back without using it
+ * @priv       : when task is enqueuer caller can attach priv along which
+ *               it will get in process callback
+ * @ret        : return value in future to use for blocking calls
  */
 struct crm_workq_task {
-	enum crm_workq_task_type type;
-	union {
-		struct cam_req_mgr_sched_request csl_req;
-		struct cam_req_mgr_add_request dev_req;
-		struct cam_req_mgr_apply_request apply_req;
-		struct cam_req_mgr_sof_notify notify_sof;
-		struct cam_req_mgr_error_notify notify_err;
-		struct cam_req_mgr_device_info dev_info;
-		struct cam_req_mgr_core_dev_link_setup link_setup;
-	} u;
-	int (*process_cb)(void *, void *);
-	void *parent;
-	struct list_head entry;
-	uint8_t cancel;
-	void *priv;
+	int32_t                  priority;
+	void                    *payload;
+	int32_t                (*process_cb)(void *, void *);
+	void                    *parent;
+	struct list_head         entry;
+	uint8_t                  cancel;
+	void                    *priv;
+	int32_t                  ret;
 };
 
-/** struct crm_core_worker
- * @work: work token used by workqueue
- * @job: workqueue internal job struct
- *task -
- * @lock: lock for task structs
- * @pending_cnt:  num of tasks pending to be processed
- * @free_cnt:  num of free/available tasks
- * @process_head: list  head of tasks pending process
- * @empty_head: list  head of available tasks which can be used
- * or acquired in order to enqueue a task to workq
- * @pool: pool  of tasks used for handling events in workq context
- *@num_task : size of tasks pool
+/** struct cam_req_mgr_core_workq
+ * @work       : work token used by workqueue
+ * @job        : workqueue internal job struct
+ * task -
+ * @lock       : lock for task structs
+ * @free_cnt   :  num of free/available tasks
+ * @empty_head : list  head of available taska which can be used
+ *               or acquired in order to enqueue a task to workq
+ * @pool       : pool of tasks used for handling events in workq context
+ * @num_task   : size of tasks pool
+ * -
  */
 struct cam_req_mgr_core_workq {
-	struct work_struct work;
-	struct workqueue_struct *job;
+	struct work_struct         work;
+	struct workqueue_struct   *job;
+	spinlock_t                 lock_bh;
 
+	/* tasks */
 	struct {
-		spinlock_t lock;
-		atomic_t pending_cnt;
-		atomic_t free_cnt;
+		struct mutex           lock;
+		atomic_t               pending_cnt;
+		atomic_t               free_cnt;
 
-		struct list_head process_head;
-		struct list_head empty_head;
-		struct crm_workq_task pool[CRM_WORKQ_NUM_TASKS];
+		struct list_head       process_head[CRM_TASK_PRIORITY_MAX];
+		struct list_head       empty_head;
+		struct crm_workq_task *pool;
+		uint32_t               num_task;
 	} task;
 };
 
 /**
  * cam_req_mgr_workq_create()
- * @brief: create a workqueue
- * @name: Name of the workque to be allocated,
- * it is combination of session handle and link handle
- * @workq: Double pointer worker
+ * @brief    : create a workqueue
+ * @name     : Name of the workque to be allocated, it is combination
+ *             of session handle and link handle
+ * @num_task : Num_tasks to be allocated for workq
+ * @workq    : Double pointer worker
  * This function will allocate and create workqueue and pass
- * the worker pointer to caller.
+ * the workq pointer to caller.
  */
-int cam_req_mgr_workq_create(char *name,
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
 	struct cam_req_mgr_core_workq **workq);
 
 /**
@@ -125,15 +104,18 @@
  * this function will destroy workqueue and clean up resources
  * associated with worker such as tasks.
  */
-void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq *workq);
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
 
 /**
  * cam_req_mgr_workq_enqueue_task()
  * @brief: Enqueue task in worker queue
- * @task: task to be processed by worker
+ * @task : task to be processed by worker
+ * @priv : clients private data
+ * @prio : task priority
  * process callback func
  */
-int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task);
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio);
 
 /**
  * cam_req_mgr_workq_get_task()
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
new file mode 100644
index 0000000..3619da7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
new file mode 100644
index 0000000..236e7f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -0,0 +1,2273 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include <linux/genalloc.h>
+
+#include "cam_smmu_api.h"
+
+#define SHARED_MEM_POOL_GRANULARITY 12
+
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 2
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+#ifdef CONFIG_CAM_SMMU_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+struct firmware_alloc_info {
+	struct device *fw_dev;
+	void *fw_kva;
+	dma_addr_t fw_dma_hdl;
+};
+
+struct firmware_alloc_info icp_fw;
+
+struct cam_smmu_work_payload {
+	int idx;
+	struct iommu_domain *domain;
+	struct device *dev;
+	unsigned long iova;
+	int flags;
+	void *token;
+	struct list_head list;
+};
+
+enum cam_protection_type {
+	CAM_PROT_INVALID,
+	CAM_NON_SECURE,
+	CAM_SECURE,
+	CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+	CAM_SMMU_INVALID,
+	CAM_QSMMU,
+	CAM_ARM_SMMU,
+	CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+	CAM_SMMU_BUFF_EXIST,
+	CAM_SMMU_BUFF_NOT_EXIST
+};
+
+enum cam_smmu_init_dir {
+	CAM_SMMU_TABLE_INIT,
+	CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+	void *bitmap;
+	size_t bits;
+	unsigned int order;
+	dma_addr_t base;
+};
+
+struct cam_smmu_region_info {
+	dma_addr_t iova_start;
+	size_t iova_len;
+};
+
+struct cam_context_bank_info {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t va_start;
+	size_t va_len;
+	const char *name;
+	bool is_secure;
+	uint8_t scratch_buf_support;
+	uint8_t firmware_support;
+	uint8_t shared_support;
+	uint8_t io_support;
+	bool is_fw_allocated;
+
+	struct scratch_mapping scratch_map;
+	struct gen_pool *shared_mem_pool;
+
+	struct cam_smmu_region_info scratch_info;
+	struct cam_smmu_region_info firmware_info;
+	struct cam_smmu_region_info shared_info;
+	struct cam_smmu_region_info io_info;
+
+	struct list_head smmu_buf_list;
+	struct mutex lock;
+	int handle;
+	enum cam_smmu_ops_param state;
+
+	void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
+		struct device *, unsigned long,
+		int, void*);
+	void *token[CAM_SMMU_CB_MAX];
+	int cb_count;
+};
+
+struct cam_iommu_cb_set {
+	struct cam_context_bank_info *cb_info;
+	u32 cb_num;
+	u32 cb_init_count;
+	struct work_struct smmu_work;
+	struct mutex payload_list_lock;
+	struct list_head payload_list;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+	{ .compatible = "qcom,msm-cam-smmu", },
+	{ .compatible = "qcom,msm-cam-smmu-cb", },
+	{ .compatible = "qcom,msm-cam-smmu-fw-dev", },
+	{}
+};
+
+struct cam_dma_buff_info {
+	struct dma_buf *buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+	enum dma_data_direction dir;
+	enum cam_smmu_region_id region_id;
+	int iommu_dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+	size_t phys_len;
+};
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+	dma_addr_t base, size_t size,
+	int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr);
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx);
+
+static void cam_smmu_clean_buffer_list(int idx);
+
+static void cam_smmu_print_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+	int j;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	if (list_empty(&iommu_cb_set.payload_list)) {
+		pr_err("Payload list empty\n");
+		mutex_unlock(&iommu_cb_set.payload_list_lock);
+		return;
+	}
+
+	payload = list_first_entry(&iommu_cb_set.payload_list,
+		struct cam_smmu_work_payload,
+		list);
+	list_del(&payload->list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	/* Dereference the payload to call the handler */
+	idx = payload->idx;
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+		if ((iommu_cb_set.cb_info[idx].handler[j])) {
+			iommu_cb_set.cb_info[idx].handler[j](
+				payload->domain,
+				payload->dev,
+				payload->iova,
+				payload->flags,
+				iommu_cb_set.cb_info[idx].token[j]);
+		}
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	kfree(payload);
+}
+
+static void cam_smmu_print_list(int idx)
+{
+	struct cam_dma_buff_info *mapping;
+
+	pr_err("index = %d\n", idx);
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+			 mapping->ion_fd, (void *)mapping->paddr,
+			 (unsigned int)mapping->len,
+			 mapping->region_id);
+	}
+}
+
+static void cam_smmu_print_table(void)
+{
+	int i;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+			   (int)iommu_cb_set.cb_info[i].handle,
+			   (void *)iommu_cb_set.cb_info[i].name);
+		pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+	}
+}
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+{
+	struct cam_dma_buff_info *mapping;
+	unsigned long start_addr, end_addr, current_addr;
+
+	current_addr = (unsigned long)vaddr;
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		start_addr = (unsigned long)mapping->paddr;
+		end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+		if (start_addr <= current_addr && current_addr < end_addr) {
+			pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+				vaddr, (void *)start_addr, (void *)end_addr,
+				mapping->ion_fd,
+				iommu_cb_set.cb_info[idx].name);
+			goto end;
+		} else {
+			CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+				vaddr, (void *)start_addr, (void *)end_addr,
+				mapping->ion_fd);
+		}
+	}
+	pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+		vaddr, iommu_cb_set.cb_info[idx].name);
+end:
+	return;
+}
+
+void cam_smmu_reg_client_page_fault_handler(int handle,
+	void (*client_page_fault_handler)(struct iommu_domain *,
+	struct device *, unsigned long,
+	int, void*), void *token)
+{
+	int idx, i = 0;
+
+	if (!token || (handle == HANDLE_INIT)) {
+		pr_err("Error: token is NULL or invalid handle\n");
+		return;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return;
+	}
+
+	if (client_page_fault_handler) {
+		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+			pr_err("%s Should not regiester more handlers\n",
+				iommu_cb_set.cb_info[idx].name);
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return;
+		}
+		iommu_cb_set.cb_info[idx].cb_count++;
+		for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+				iommu_cb_set.cb_info[idx].token[i] = token;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					client_page_fault_handler;
+				break;
+			}
+		}
+	} else {
+		for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == token) {
+				iommu_cb_set.cb_info[idx].token[i] = NULL;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					NULL;
+				iommu_cb_set.cb_info[idx].cb_count--;
+				break;
+			}
+		}
+		if (i == CAM_SMMU_CB_MAX)
+			pr_err("Error: hdl %x no matching tokens: %s\n",
+				handle, iommu_cb_set.cb_info[idx].name);
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova,
+	int flags, void *token)
+{
+	char *cb_name;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+
+	if (!token) {
+		pr_err("Error: token is NULL\n");
+		pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
+		pr_err("iova = %lX, flags = %d\n", iova, flags);
+		return 0;
+	}
+
+	cb_name = (char *)token;
+	/* Check whether it is in the table */
+	for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+		if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+			break;
+	}
+
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: index is not valid, index = %d, token = %s\n",
+			idx, cb_name);
+		return 0;
+	}
+
+	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+	if (!payload)
+		return 0;
+
+	payload->domain = domain;
+	payload->dev = dev;
+	payload->iova = iova;
+	payload->flags = flags;
+	payload->token = token;
+	payload->idx = idx;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	schedule_work(&iommu_cb_set.smmu_work);
+
+	return 0;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return IOMMU_READ;
+	case CAM_SMMU_MAP_WRITE:
+		return IOMMU_WRITE;
+	case CAM_SMMU_MAP_RW:
+		return IOMMU_READ|IOMMU_WRITE;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		pr_err("Error: Direction is invalid. dir = %d\n", dir);
+		break;
+	};
+	return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return DMA_FROM_DEVICE;
+	case CAM_SMMU_MAP_WRITE:
+		return DMA_TO_DEVICE;
+	case CAM_SMMU_MAP_RW:
+		return DMA_BIDIRECTIONAL;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+		break;
+	}
+	return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+	unsigned int i;
+	int j = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+		iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+		iommu_cb_set.cb_info[i].dev = NULL;
+		iommu_cb_set.cb_info[i].cb_count = 0;
+		for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+			iommu_cb_set.cb_info[i].token[j] = NULL;
+			iommu_cb_set.cb_info[i].handler[j] = NULL;
+		}
+		if (ops == CAM_SMMU_TABLE_INIT)
+			mutex_init(&iommu_cb_set.cb_info[i].lock);
+		else
+			mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+	}
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+	int i;
+
+	if (hdl == HANDLE_INIT) {
+		CDBG("iommu handle is init number. Need to try again\n");
+		return 1;
+	}
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+			continue;
+
+		if (iommu_cb_set.cb_info[i].handle == hdl) {
+			CDBG("iommu handle %d conflicts\n", (int)hdl);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ *  use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+	int rand, hdl = 0;
+
+	get_random_bytes(&rand, COOKIE_NUM_BYTE);
+	hdl = GET_SMMU_HDL(idx, rand);
+	CDBG("create handle value = %x\n", (int)hdl);
+	return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+	int rc;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* attach the mapping to device */
+	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (rc < 0) {
+		pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl)
+{
+	int i;
+	int handle;
+
+	/* create handle and add in the iommu hardware table */
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+			mutex_lock(&iommu_cb_set.cb_info[i].lock);
+			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+				pr_err("Error: %s already got handle 0x%x\n",
+					name,
+					iommu_cb_set.cb_info[i].handle);
+				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+				return -EINVAL;
+			}
+
+			/* make sure handle is unique */
+			do {
+				handle = cam_smmu_create_iommu_handle(i);
+			} while (cam_smmu_check_handle_unique(handle));
+
+			/* put handle in the table */
+			iommu_cb_set.cb_info[i].handle = handle;
+			iommu_cb_set.cb_info[i].cb_count = 0;
+			*hdl = handle;
+			CDBG("%s creates handle 0x%x\n", name, handle);
+			mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+			return 0;
+		}
+	}
+
+	pr_err("Error: Cannot find name %s or all handle exist!\n",
+			name);
+	cam_smmu_print_table();
+	return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+					dma_addr_t base, size_t size,
+					int order)
+{
+	unsigned int count = size >> (PAGE_SHIFT + order);
+	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	int err = 0;
+
+	if (!count) {
+		err = -EINVAL;
+		pr_err("Page count is zero, size passed = %zu\n", size);
+		goto bail;
+	}
+
+	scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!scratch_map->bitmap) {
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	scratch_map->base = base;
+	scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+	scratch_map->order = order;
+
+bail:
+	return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+
+	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+		 (1 << mapping->order) - 1) >> mapping->order;
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be allocated
+	 */
+	count++;
+
+	if (order > mapping->order)
+		align = (1 << (order - mapping->order)) - 1;
+
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+
+	if (start > mapping->bits)
+		return -ENOMEM;
+
+	bitmap_set(mapping->bitmap, start, count);
+	*iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+
+	return 0;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >>
+			     (mapping->order + PAGE_SHIFT);
+	unsigned int count = ((size >> PAGE_SHIFT) +
+			      (1 << mapping->order) - 1) >> mapping->order;
+
+	if (!addr) {
+		pr_err("Error: Invalid address\n");
+		return -EINVAL;
+	}
+
+	if (start + count > mapping->bits) {
+		pr_err("Error: Invalid page bits in scratch map\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be freed
+	 */
+	count++;
+	bitmap_clear(mapping->bitmap, start, count);
+
+	return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->paddr == virt_addr) {
+			CDBG("Found virtual address %lx\n",
+				 (unsigned long)virt_addr);
+			return mapping;
+		}
+	}
+
+	pr_err("Error: Cannot find virtual address %lx by index %d\n",
+		(unsigned long)virt_addr, idx);
+	return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			CDBG(" find ion_fd %d\n", ion_fd);
+			return mapping;
+		}
+	}
+
+	pr_err("Error: Cannot find fd %d by index %d\n",
+		ion_fd, idx);
+	return NULL;
+}
+
+static void cam_smmu_clean_buffer_list(int idx)
+{
+	int ret;
+	struct cam_dma_buff_info *mapping_info, *temp;
+
+	list_for_each_entry_safe(mapping_info, temp,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+			(void *)mapping_info->paddr, idx,
+			mapping_info->ion_fd);
+
+		if (mapping_info->ion_fd == 0xDEADBEEF)
+			/* Clean up scratch buffers */
+			ret = cam_smmu_free_scratch_buffer_remove_from_list(
+							mapping_info, idx);
+		else
+			/* Clean up regular mapped buffers */
+			ret = cam_smmu_unmap_buf_and_remove_from_list(
+					mapping_info,
+					idx);
+
+		if (ret < 0) {
+			pr_err("Buffer delete failed: idx = %d\n", idx);
+			pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
+					(unsigned long)mapping_info->paddr,
+					mapping_info->ion_fd);
+			/*
+			 * Ignore this error and continue to delete other
+			 * buffers in the list
+			 */
+			continue;
+		}
+	}
+}
+
+static int cam_smmu_attach(int idx)
+{
+	int ret;
+
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		ret = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		ret = cam_smmu_attach_device(idx);
+		if (ret < 0) {
+			pr_err("Error: ATTACH fail\n");
+			return -ENODEV;
+		}
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+		ret = 0;
+	} else {
+		pr_err("Error: Not detach/attach: %d\n",
+			iommu_cb_set.cb_info[idx].state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int cam_smmu_detach_device(int idx)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* detach the mapping to device if not already detached */
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		rc = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		arm_iommu_detach_device(cb->dev);
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_alloc_iova(size_t size,
+	int32_t smmu_hdl, uint32_t *iova)
+{
+	int rc = 0;
+	int idx;
+	uint32_t vaddr = 0;
+
+	if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].shared_support) {
+		pr_err("Error: Shared memory not supported for hdl = %X\n",
+			smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	vaddr = gen_pool_alloc(iommu_cb_set.cb_info[idx].shared_mem_pool, size);
+	if (!vaddr)
+		return -ENOMEM;
+
+	*iova = vaddr;
+
+get_addr_end:
+	return rc;
+}
+
+static int cam_smmu_free_iova(uint32_t addr, size_t size,
+	int32_t smmu_hdl)
+{
+	int rc = 0;
+	int idx;
+
+	if (!size || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool, addr, size);
+
+get_addr_end:
+	return rc;
+}
+
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uint64_t *cpuva,
+	size_t *len)
+{
+	int rc;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+
+	if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		pr_err("Firmware memory not supported for this SMMU handle\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		pr_err("Trying to allocate twice\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	CDBG("Firmware area len from DT = %zu\n", firmware_len);
+
+	icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
+		firmware_len,
+		&icp_fw.fw_dma_hdl,
+		GFP_KERNEL);
+	if (!icp_fw.fw_kva) {
+		pr_err("FW memory alloc failed\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	} else {
+		CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+			icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
+	}
+
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	rc = iommu_map(domain,
+		firmware_start,
+		icp_fw.fw_dma_hdl,
+		firmware_len,
+		IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+
+	if (rc) {
+		pr_err("Failed to map FW into IOMMU\n");
+		rc = -ENOMEM;
+		goto alloc_fail;
+	}
+	iommu_cb_set.cb_info[idx].is_fw_allocated = true;
+
+	*iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	*cpuva = (uint64_t)icp_fw.fw_kva;
+	*len = firmware_len;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return rc;
+
+alloc_fail:
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_firmware);
+
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl)
+{
+	int rc = 0;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+	size_t unmapped = 0;
+
+	if (smmu_hdl == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		pr_err("Firmware memory not supported for this SMMU handle\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		pr_err("Trying to deallocate firmware that is not allocated\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	unmapped = iommu_unmap(domain,
+		firmware_start,
+		firmware_len);
+
+	if (unmapped != firmware_len) {
+		pr_err("Only %zu unmapped out of total %zu\n",
+			unmapped,
+			firmware_len);
+		rc = -EINVAL;
+	}
+
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+
+	icp_fw.fw_kva = 0;
+	icp_fw.fw_dma_hdl = 0;
+
+	iommu_cb_set.cb_info[idx].is_fw_allocated = false;
+
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	 size_t *len_ptr,
+	 enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info;
+	struct dma_buf *buf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct iommu_domain *domain;
+	size_t size = 0;
+	uint32_t iova = 0;
+
+	/* allocate memory for each buffer information */
+	buf = dma_buf_get(ion_fd);
+	if (IS_ERR_OR_NULL(buf)) {
+		rc = PTR_ERR(buf);
+		pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+		goto err_out;
+	}
+
+	attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		rc = PTR_ERR(attach);
+		pr_err("Error: dma buf attach failed\n");
+		goto err_put;
+	}
+
+	table = dma_buf_map_attachment(attach, dma_dir);
+	if (IS_ERR_OR_NULL(table)) {
+		rc = PTR_ERR(table);
+		pr_err("Error: dma buf map attachment failed\n");
+		goto err_detach;
+	}
+
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+		if (!domain) {
+			pr_err("CB has no domain set\n");
+			goto err_unmap_sg;
+		}
+
+		rc = cam_smmu_alloc_iova(*len_ptr,
+			iommu_cb_set.cb_info[idx].handle,
+			&iova);
+
+		if (rc < 0) {
+			pr_err("IOVA alloc failed for shared memory\n");
+			goto err_unmap_sg;
+		}
+
+		size = iommu_map_sg(domain,
+			iova,
+			table->sgl,
+			table->nents,
+			IOMMU_READ | IOMMU_WRITE);
+
+		if (size < 0) {
+			pr_err("IOMMU mapping failed\n");
+			rc = cam_smmu_free_iova(iova,
+				size,
+				iommu_cb_set.cb_info[idx].handle);
+
+			if (rc)
+				pr_err("IOVA free failed\n");
+			rc = -ENOMEM;
+			goto err_unmap_sg;
+		} else {
+			CDBG("iommu_map_sg returned %zu\n", size);
+			*paddr_ptr = iova;
+			*len_ptr = size;
+		}
+	} else if (region_id == CAM_SMMU_REGION_IO) {
+		rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
+			table->sgl, table->nents, dma_dir, buf);
+
+		if (rc != table->nents) {
+			pr_err("Error: msm_dma_map_sg_lazy failed\n");
+			rc = -ENOMEM;
+			goto err_unmap_sg;
+		} else {
+			*paddr_ptr = sg_dma_address(table->sgl);
+			*len_ptr = (size_t)sg_dma_len(table->sgl);
+		}
+	} else {
+		pr_err("Error: Wrong region id passed for %s\n", __func__);
+		rc = -EINVAL;
+		goto err_unmap_sg;
+	}
+
+	if (table->sgl) {
+		CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
+				(void *)buf,
+				(void *)iommu_cb_set.cb_info[idx].dev,
+				(void *)attach, (void *)table);
+		CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
+				(void *)table->sgl, rc,
+				(unsigned int)table->sgl->dma_address);
+	} else {
+		rc = -EINVAL;
+		pr_err("Error: table sgl is null\n");
+		goto err_unmap_sg;
+	}
+
+	/* fill up mapping_info */
+	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+	mapping_info->ion_fd = ion_fd;
+	mapping_info->buf = buf;
+	mapping_info->attach = attach;
+	mapping_info->table = table;
+	mapping_info->paddr = *paddr_ptr;
+	mapping_info->len = *len_ptr;
+	mapping_info->dir = dma_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->region_id = region_id;
+
+	if (!*paddr_ptr || !*len_ptr) {
+		pr_err("Error: Space Allocation failed!\n");
+		kfree(mapping_info);
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+	CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
+		(void *)iommu_cb_set.cb_info[idx].dev,
+		(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	/* add to the list */
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+	return 0;
+
+err_alloc:
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		cam_smmu_free_iova(iova,
+			size,
+			iommu_cb_set.cb_info[idx].handle);
+
+		iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+			*paddr_ptr,
+			*len_ptr);
+	} else if (region_id == CAM_SMMU_REGION_IO) {
+		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+			table->sgl,
+			table->nents,
+			dma_dir,
+			buf);
+	}
+err_unmap_sg:
+	dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+	dma_buf_detach(buf, attach);
+err_put:
+	dma_buf_put(buf);
+err_out:
+	return rc;
+}
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc;
+	size_t size;
+	struct iommu_domain *domain;
+
+	if ((!mapping_info->buf) || (!mapping_info->table) ||
+		(!mapping_info->attach)) {
+		pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		pr_err("Error:dma_buf = %pK, attach = %pK\n",
+			(void *)mapping_info->buf,
+			(void *)mapping_info->attach);
+		return -EINVAL;
+	}
+
+	if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
+		CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+			(void *)mapping_info->paddr, mapping_info->len);
+
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+		size = iommu_unmap(domain,
+			mapping_info->paddr,
+			mapping_info->len);
+
+		if (size != mapping_info->len) {
+			pr_err("IOMMU unmap failed\n");
+			pr_err("Unmapped = %zu, requested = %zu\n",
+				size,
+				mapping_info->len);
+		}
+
+		rc = cam_smmu_free_iova(mapping_info->paddr,
+			mapping_info->len,
+			iommu_cb_set.cb_info[idx].handle);
+
+		if (rc)
+			pr_err("IOVA free failed\n");
+
+	} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
+		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+			mapping_info->table->sgl, mapping_info->table->nents,
+			mapping_info->dir, mapping_info->buf);
+	}
+
+	dma_buf_unmap_attachment(mapping_info->attach,
+		mapping_info->table, mapping_info->dir);
+	dma_buf_detach(mapping_info->buf, mapping_info->attach);
+	dma_buf_put(mapping_info->buf);
+
+	mapping_info->buf = NULL;
+
+	list_del_init(&mapping_info->list);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+	int ion_fd, dma_addr_t *paddr_ptr,
+	size_t *len_ptr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+	int ret = 0;
+
+	if (!identifier) {
+		pr_err("Error: iommu hardware name is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!handle_ptr) {
+		pr_err("Error: handle pointer is NULL\n");
+		return -EINVAL;
+	}
+
+	/* create and put handle in the table */
+	ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+	if (ret < 0)
+		pr_err("Error: %s get handle fail\n", identifier);
+
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+	int ret = 0, idx;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	switch (ops) {
+	case CAM_SMMU_ATTACH: {
+		ret = cam_smmu_attach(idx);
+		break;
+	}
+	case CAM_SMMU_DETACH: {
+		ret = cam_smmu_detach_device(idx);
+		break;
+	}
+	case CAM_SMMU_VOTE:
+	case CAM_SMMU_DEVOTE:
+	default:
+		pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr)
+{
+	unsigned long nents = virt_len / phys_len;
+	struct cam_dma_buff_info *mapping_info = NULL;
+	size_t unmapped;
+	dma_addr_t iova = 0;
+	struct scatterlist *sg;
+	int i = 0;
+	int rc;
+	struct iommu_domain *domain = NULL;
+	struct page *page;
+	struct sg_table *table = NULL;
+
+	CDBG("%s: nents = %lu, idx = %d, virt_len  = %zx\n",
+		__func__, nents, idx, virt_len);
+	CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
+		__func__, phys_len, iommu_dir, virt_addr);
+
+	/*
+	 * This table will go inside the 'mapping' structure
+	 * where it will be held until put_scratch_buffer is called
+	 */
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table) {
+		rc = -ENOMEM;
+		goto err_table_alloc;
+	}
+
+	rc = sg_alloc_table(table, nents, GFP_KERNEL);
+	if (rc < 0) {
+		rc = -EINVAL;
+		goto err_sg_alloc;
+	}
+
+	page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+	if (!page) {
+		rc = -ENOMEM;
+		goto err_page_alloc;
+	}
+
+	/* Now we create the sg list */
+	for_each_sg(table->sgl, sg, table->nents, i)
+		sg_set_page(sg, page, phys_len, 0);
+
+
+	/* Get the domain from within our cb_set struct and map it*/
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+	rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+		virt_len, &iova);
+
+	if (rc < 0) {
+		pr_err("Could not find valid iova for scratch buffer");
+		goto err_iommu_map;
+	}
+
+	if (iommu_map_sg(domain,
+		iova,
+		table->sgl,
+		table->nents,
+		iommu_dir) != virt_len) {
+		pr_err("iommu_map_sg() failed");
+		goto err_iommu_map;
+	}
+
+	/* Now update our mapping information within the cb_set struct */
+	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOMEM;
+		goto err_mapping_info;
+	}
+
+	mapping_info->ion_fd = 0xDEADBEEF;
+	mapping_info->buf = NULL;
+	mapping_info->attach = NULL;
+	mapping_info->table = table;
+	mapping_info->paddr = iova;
+	mapping_info->len = virt_len;
+	mapping_info->iommu_dir = iommu_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->phys_len = phys_len;
+	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
+
+	CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
+		__func__, (void *)mapping_info->paddr,
+		mapping_info->len, mapping_info->phys_len);
+
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	*virt_addr = (dma_addr_t)iova;
+
+	CDBG("%s: mapped virtual address = %lx\n", __func__,
+		(unsigned long)*virt_addr);
+	return 0;
+
+err_mapping_info:
+	unmapped = iommu_unmap(domain, iova,  virt_len);
+	if (unmapped != virt_len)
+		pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+err_iommu_map:
+	__free_pages(page, get_order(phys_len));
+err_page_alloc:
+	sg_free_table(table);
+err_sg_alloc:
+	kfree(table);
+err_table_alloc:
+	return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc = 0;
+	size_t unmapped;
+	struct iommu_domain *domain =
+		iommu_cb_set.cb_info[idx].mapping->domain;
+	struct scratch_mapping *scratch_map =
+		&iommu_cb_set.cb_info[idx].scratch_map;
+
+	if (!mapping_info->table) {
+		pr_err("Error: Invalid params: dev = %pK, table = %pK",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		return -EINVAL;
+	}
+
+	/* Clean up the mapping_info struct from the list */
+	unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+	if (unmapped != mapping_info->len)
+		pr_err("Unmapped only %zx instead of %zx",
+			unmapped, mapping_info->len);
+
+	rc = cam_smmu_free_scratch_va(scratch_map,
+		mapping_info->paddr,
+		mapping_info->len);
+	if (rc < 0) {
+		pr_err("Error: Invalid iova while freeing scratch buffer\n");
+		rc = -EINVAL;
+	}
+
+	__free_pages(sg_page(mapping_info->table->sgl),
+			get_order(mapping_info->phys_len));
+	sg_free_table(mapping_info->table);
+	kfree(mapping_info->table);
+	list_del_init(&mapping_info->list);
+
+	kfree(mapping_info);
+	mapping_info = NULL;
+
+	return rc;
+}
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len)
+{
+	int idx, rc;
+	unsigned int iommu_dir;
+
+	if (!paddr_ptr || !virt_len || !phys_len) {
+		pr_err("Error: Input pointer or lengths invalid\n");
+		return -EINVAL;
+	}
+
+	if (virt_len < phys_len) {
+		pr_err("Error: virt_len > phys_len\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+	if (iommu_dir == IOMMU_INVALID_DIR) {
+		pr_err("Error: translate direction failed. dir = %d\n", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		pr_err("Error: Context bank does not support scratch bufs\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
+		__func__, handle, idx, dir);
+	CDBG("%s: virt_len = %zx, phys_len  = %zx\n",
+		__func__, phys_len, virt_len);
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+				iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+		pr_err("Requested scratch buffer length not page aligned\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, phys_len)) {
+		pr_err("Requested virt length not aligned with phys length\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+		virt_len,
+		phys_len,
+		iommu_dir,
+		paddr_ptr);
+	if (rc < 0)
+		pr_err("Error: mapping or add list fail\n");
+
+error:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr)
+{
+	int idx;
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		pr_err("Error: Context bank does not support scratch buffers\n");
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	/* Based on virtual address and index, we can find mapping info
+	 * of the scratch buffer
+	 */
+	mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params\n");
+		rc = -ENODEV;
+		goto handle_err;
+	}
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+	if (rc < 0) {
+		pr_err("Error: unmap or remove list fail\n");
+		goto handle_err;
+	}
+
+handle_err:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+int cam_smmu_map_sec_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr)
+{
+	/* not implemented yet */
+	return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_map_sec_iova);
+
+int cam_smmu_map_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	enum dma_data_direction dma_dir;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		pr_err("Input pointers are invalid\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	if (region_id != CAM_SMMU_REGION_SHARED)
+		*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		pr_err("translate direction failed. dir = %d\n", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+				iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
+		len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CDBG("ion_fd:%d already in the list, give same addr back",
+				 ion_fd);
+		rc = -EALREADY;
+		goto get_addr_end;
+	}
+	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			paddr_ptr, len_ptr, region_id);
+	if (rc < 0)
+		pr_err("mapping or add list fail\n");
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_iova);
+
+
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		pr_err("Error: Input pointers are invalid\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CDBG("ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_iova);
+
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd)
+{
+	/* not implemented yet */
+	return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_sec_iova);
+
+int cam_smmu_unmap_iova(int handle,
+	int ion_fd,
+	enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Unmapping one buffer from device */
+	CDBG("SMMU: removing buffer idx = %d\n", idx);
+	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc < 0)
+		pr_err("Error: unmap or remove list fail\n");
+
+unmap_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_iova);
+
+int cam_smmu_put_iova(int handle, int ion_fd)
+{
+	int idx;
+	int rc = 0;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_iova);
+
+int cam_smmu_destroy_handle(int handle)
+{
+	int idx;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+		pr_err("Client %s buffer list is not clean!\n",
+			iommu_cb_set.cb_info[idx].name);
+		cam_smmu_print_list(idx);
+		cam_smmu_clean_buffer_list(idx);
+	}
+
+	iommu_cb_set.cb_info[idx].cb_count = 0;
+	iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb)
+{
+	arm_iommu_detach_device(cb->dev);
+
+	if (cb->io_support && cb->mapping) {
+		arm_iommu_release_mapping(cb->mapping);
+		cb->mapping = NULL;
+	}
+
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+	int i = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++)
+		cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]);
+
+	devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+	iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+	struct device *dev)
+{
+	int rc = 0;
+
+	if (!cb || !dev) {
+		pr_err("Error: invalid input params\n");
+		return -EINVAL;
+	}
+
+	cb->dev = dev;
+	cb->is_fw_allocated = false;
+
+	/* Create a pool with 4K granularity for supporting shared memory */
+	if (cb->shared_support) {
+		cb->shared_mem_pool = gen_pool_create(
+			SHARED_MEM_POOL_GRANULARITY, -1);
+
+		if (!cb->shared_mem_pool)
+			return -ENOMEM;
+
+		rc = gen_pool_add(cb->shared_mem_pool,
+			cb->shared_info.iova_start,
+			cb->shared_info.iova_len,
+			-1);
+
+		CDBG("Shared mem start->%lX\n",
+			(unsigned long)cb->shared_info.iova_start);
+		CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+
+		if (rc) {
+			pr_err("Genpool chunk creation failed\n");
+			gen_pool_destroy(cb->shared_mem_pool);
+			cb->shared_mem_pool = NULL;
+			return rc;
+		}
+	}
+
+	if (cb->scratch_buf_support) {
+		rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+			cb->scratch_info.iova_start,
+			cb->scratch_info.iova_len,
+			0);
+		if (rc < 0) {
+			pr_err("Error: failed to create scratch map\n");
+			rc = -ENODEV;
+			goto end;
+		}
+	}
+
+	/* create a virtual mapping */
+	if (cb->io_support) {
+		cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
+			cb->io_info.iova_start, cb->io_info.iova_len);
+		if (IS_ERR(cb->mapping)) {
+			pr_err("Error: create mapping Failed\n");
+			rc = -ENODEV;
+			goto end;
+		}
+	} else {
+		pr_err("Context bank does not have IO region\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	return rc;
+end:
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+
+	return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+	struct device_node *domains_child_node = NULL;
+
+	if (!dev) {
+		pr_err("Error: Invalid device\n");
+		return -ENODEV;
+	}
+
+	iommu_cb_set.cb_num = 0;
+
+	/* traverse thru all the child nodes and increment the cb count */
+	for_each_available_child_of_node(dev->of_node, domains_child_node) {
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,msm-cam-smmu-cb"))
+			iommu_cb_set.cb_num++;
+
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,qsmmu-cam-cb"))
+			iommu_cb_set.cb_num++;
+	}
+
+	if (iommu_cb_set.cb_num == 0) {
+		pr_err("Error: no context banks present\n");
+		return -ENOENT;
+	}
+
+	/* allocate memory for the context banks */
+	iommu_cb_set.cb_info = devm_kzalloc(dev,
+		iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+		GFP_KERNEL);
+
+	if (!iommu_cb_set.cb_info) {
+		pr_err("Error: cannot allocate context banks\n");
+		return -ENOMEM;
+	}
+
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+	iommu_cb_set.cb_init_count = 0;
+
+	CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+	return 0;
+}
+
+static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
+	struct cam_context_bank_info *cb)
+{
+	int rc = 0;
+	struct device_node *mem_map_node = NULL;
+	struct device_node *child_node = NULL;
+	const char *region_name;
+	int num_regions = 0;
+
+	if (!of_node || !cb) {
+		pr_err("Invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+	if (!mem_map_node) {
+		pr_err("iova-mem-map not present\n");
+		return -EINVAL;
+	}
+
+	for_each_available_child_of_node(mem_map_node, child_node) {
+		uint32_t region_start;
+		uint32_t region_len;
+		uint32_t region_id;
+
+		num_regions++;
+		rc = of_property_read_string(child_node,
+			"iova-region-name", &region_name);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("IOVA region not found\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-start", &region_start);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-start\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-len", &region_len);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-len\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-id", &region_id);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-id\n");
+			return -EINVAL;
+		}
+
+		switch (region_id) {
+		case CAM_SMMU_REGION_FIRMWARE:
+			cb->firmware_support = 1;
+			cb->firmware_info.iova_start = region_start;
+			cb->firmware_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SHARED:
+			cb->shared_support = 1;
+			cb->shared_info.iova_start = region_start;
+			cb->shared_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SCRATCH:
+			cb->scratch_buf_support = 1;
+			cb->scratch_info.iova_start = region_start;
+			cb->scratch_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_IO:
+			cb->io_support = 1;
+			cb->io_info.iova_start = region_start;
+			cb->io_info.iova_len = region_len;
+			break;
+		default:
+			pr_err("Incorrect region id present in DT file: %d\n",
+				region_id);
+		}
+
+		CDBG("Found label -> %s\n", cb->name);
+		CDBG("Found region -> %s\n", region_name);
+		CDBG("region_start -> %X\n", region_start);
+		CDBG("region_len -> %X\n", region_len);
+		CDBG("region_id -> %X\n", region_id);
+	}
+	of_node_put(mem_map_node);
+
+	if (!num_regions) {
+		pr_err("No memory regions found, at least one needed\n");
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+	enum cam_iommu_type type)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb;
+	struct device *ctx = NULL;
+
+	if (!dev) {
+		pr_err("Error: Invalid device\n");
+		return -ENODEV;
+	}
+
+	/* check the bounds */
+	if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+		pr_err("Error: populate more than allocated cb\n");
+		rc = -EBADHANDLE;
+		goto cb_init_fail;
+	}
+
+	/* read the context bank from cb set */
+	cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+	/* set the name of the context bank */
+	rc = of_property_read_string(dev->of_node, "label", &cb->name);
+	if (rc < 0) {
+		pr_err("Error: failed to read label from sub device\n");
+		goto cb_init_fail;
+	}
+
+	rc = cam_smmu_get_memory_regions_info(dev->of_node,
+		cb);
+	if (rc < 0) {
+		pr_err("Error: Getting region info\n");
+		return rc;
+	}
+
+	/* set up the iommu mapping for the  context bank */
+	if (type == CAM_QSMMU) {
+		pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+		return -ENODEV;
+	}
+
+	ctx = dev;
+	CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+
+	rc = cam_smmu_setup_cb(cb, ctx);
+	if (rc < 0) {
+		pr_err("Error: failed to setup cb : %s\n", cb->name);
+		goto cb_init_fail;
+	}
+
+	if (cb->io_support && cb->mapping)
+		iommu_set_fault_handler(cb->mapping->domain,
+			cam_smmu_iommu_fault_handler,
+			(void *)cb->name);
+
+	/* increment count to next bank */
+	iommu_cb_set.cb_init_count++;
+
+	CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+
+cb_init_fail:
+	return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+		rc = cam_alloc_smmu_context_banks(dev);
+		if (rc < 0) {
+			pr_err("Error: allocating context banks\n");
+			return -ENOMEM;
+		}
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+		if (rc < 0) {
+			pr_err("Error: populating context banks\n");
+			return -ENOMEM;
+		}
+		return rc;
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+		if (rc < 0) {
+			pr_err("Error: populating context banks\n");
+			return -ENOMEM;
+		}
+		return rc;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-fw-dev")) {
+		icp_fw.fw_dev = &pdev->dev;
+		icp_fw.fw_kva = NULL;
+		icp_fw.fw_dma_hdl = 0;
+		return rc;
+	}
+
+	/* probe through all the subdevices */
+	rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+				NULL, &pdev->dev);
+	if (rc < 0) {
+		pr_err("Error: populating devices\n");
+	} else {
+		INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+		mutex_init(&iommu_cb_set.payload_list_lock);
+		INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+	}
+
+	return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+	/* release all the context banks and memory allocated */
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+		cam_smmu_release_cb(pdev);
+	return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+	.probe = cam_smmu_probe,
+	.remove = cam_smmu_remove,
+	.driver = {
+		.name = "msm_cam_smmu",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_smmu_dt_match,
+	},
+};
+
+static int __init cam_smmu_init_module(void)
+{
+	return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+	platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
new file mode 100644
index 0000000..76e9135
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -0,0 +1,255 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+
+/*Enum for possible CAM SMMU operations */
+enum cam_smmu_ops_param {
+	CAM_SMMU_ATTACH,
+	CAM_SMMU_DETACH,
+	CAM_SMMU_VOTE,
+	CAM_SMMU_DEVOTE,
+	CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+	CAM_SMMU_MAP_READ,
+	CAM_SMMU_MAP_WRITE,
+	CAM_SMMU_MAP_RW,
+	CAM_SMMU_MAP_INVALID
+};
+
+enum cam_smmu_region_id {
+	CAM_SMMU_REGION_FIRMWARE,
+	CAM_SMMU_REGION_SHARED,
+	CAM_SMMU_REGION_SCRATCH,
+	CAM_SMMU_REGION_IO
+};
+
+/**
+ * @brief           : Gets an smmu handle
+ *
+ * @param identifier: Unique identifier to be used by clients which they
+ *                    should get from device tree. CAM SMMU driver will
+ *                    not enforce how this string is obtained and will
+ *                    only validate this against the list of permitted
+ *                    identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ *                    fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @brief       : Performs IOMMU operations
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op    : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ *                or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @brief       : Maps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @dir         : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ *                DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr    : Pointer to physical address where mapped address will be
+ *                returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ *                CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
+ *                which specifies the cpu virtual address to map.
+ * @len         : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_iova(int handle,
+	int ion_fd,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief          : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir      : Direction of mapping which will translate to IOMMU_READ
+ *                   IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ *                   able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len);
+
+/**
+ * @brief          : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
+ * @param paddr    : Device virtual address of client's scratch buffer that
+ *                   will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr);
+
+/**
+ *@brief        : Destroys an smmu handle
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @brief       : Finds index by handle in the smmu client table
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @brief       : Registers smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_reg_client_page_fault_handler(int handle,
+	void (*client_page_fault_handler)(struct iommu_domain *,
+	struct device *, unsigned long,
+	int, void*), void *token);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
+/**
+ * @brief Unmaps memory from context bank
+ *
+ * @param handle: SMMU handle identifying the context bank
+ * @param ion_fd: ION fd of memory to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_iova(int handle, int ion_fd);
+
+/**
+ * @brief Maps secure memory for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to map securely
+ * @param dir: DMA Direction for the mapping
+ * @param dma_addr: Returned IOVA address after mapping
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_sec_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @brief Unmaps secure memopry for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd);
+
+
+/**
+ * @brief Allocates firmware for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated firmware
+ * @param kvaddr: CPU mapped address of allocated firmware
+ * @param len: Length of allocated firmware memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uint64_t *kvaddr,
+	size_t *len);
+
+/**
+ * @brief Deallocates firmware memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 4f5bf87..ecc62c8 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -57,6 +57,48 @@
 	return 0;
 }
 
+uint32_t cam_sync_util_get_group_object_state(struct sync_table_row *table,
+	uint32_t *sync_objs,
+	uint32_t num_objs)
+{
+	int i;
+	struct sync_table_row *child_row = NULL;
+	int success_count = 0;
+	int active_count = 0;
+
+	if (!table || !sync_objs)
+		return CAM_SYNC_STATE_SIGNALED_ERROR;
+
+	/*
+	 * We need to arrive at the state of the merged object based on
+	 * counts of error, active and success states of all children objects
+	 */
+	for (i = 0; i < num_objs; i++) {
+		child_row = table + sync_objs[i];
+		switch (child_row->state) {
+		case CAM_SYNC_STATE_SIGNALED_ERROR:
+			return CAM_SYNC_STATE_SIGNALED_ERROR;
+		case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+			success_count++;
+			break;
+		case CAM_SYNC_STATE_ACTIVE:
+			active_count++;
+			break;
+		default:
+			pr_err("Invalid state of child object during merge\n");
+			return CAM_SYNC_STATE_SIGNALED_ERROR;
+		}
+	}
+
+	if (active_count)
+		return CAM_SYNC_STATE_ACTIVE;
+
+	if (success_count == num_objs)
+		return CAM_SYNC_STATE_SIGNALED_SUCCESS;
+
+	return CAM_SYNC_STATE_SIGNALED_ERROR;
+}
+
 int cam_sync_init_group_object(struct sync_table_row *table,
 	uint32_t idx,
 	uint32_t *sync_objs,
@@ -113,12 +155,16 @@
 
 	row->type = CAM_SYNC_TYPE_GROUP;
 	row->sync_id = idx;
-	row->state = CAM_SYNC_STATE_ACTIVE;
+	row->state = cam_sync_util_get_group_object_state(table,
+		sync_objs, num_objs);
 	row->remaining = num_objs;
 	init_completion(&row->signaled);
 	INIT_LIST_HEAD(&row->callback_list);
 	INIT_LIST_HEAD(&row->user_payload_list);
 
+	if (row->state != CAM_SYNC_STATE_ACTIVE)
+		complete_all(&row->signaled);
+
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 	return 0;
 }
@@ -208,6 +254,11 @@
 	int i;
 	struct sync_table_row *row = NULL;
 
+	if (num_objs <= 1) {
+		pr_err("Single object merge is not allowed\n");
+		return -EINVAL;
+	}
+
 	for (i = 0; i < num_objs; i++) {
 		row = sync_dev->sync_table + sync_obj[i];
 		spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 3bf6ce0..9194b44 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,7 +96,6 @@
  * @SDE_CAPS_R1_WB: MDSS V1.x WB block
  * @SDE_CAPS_R3_WB: MDSS V3.x WB block
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
- * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
  */
@@ -105,7 +104,6 @@
 	SDE_CAPS_R3_WB,
 	SDE_CAPS_R3_1P5_DOWNSCALE,
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
-	SDE_CAPS_MIN_BUS_VOTE,
 	SDE_CAPS_SBUF_1,
 	SDE_CAPS_UBWC_2,
 	SDE_CAPS_MAX,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 9a28700..30fda07 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -25,6 +25,7 @@
 #include <linux/msm-bus-board.h>
 #include <linux/regulator/consumer.h>
 #include <linux/dma-direction.h>
+#include <linux/sde_rsc.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 #include <asm/cacheflush.h>
@@ -293,14 +294,13 @@
 
 	SDEROT_DBG("core_clk %lu\n", total_clk_rate);
 	ATRACE_INT("core_clk", total_clk_rate);
-	sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_ROT_CORE);
+	sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_MDSS_ROT);
 
 	return 0;
 }
 
 static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	int ret;
 
 	if (WARN_ON(mgr->regulator_enable == on)) {
@@ -311,7 +311,7 @@
 	SDEROT_EVTLOG(on);
 	SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
 
-	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && on) {
+	if (on) {
 		mgr->minimum_bw_vote = mgr->enable_bw_vote;
 		sde_rotator_update_perf(mgr);
 	}
@@ -319,8 +319,13 @@
 	if (mgr->ops_hw_pre_pmevent)
 		mgr->ops_hw_pre_pmevent(mgr, on);
 
-	ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
-		mgr->module_power.num_vreg, on);
+	if (mgr->rsc_client)
+		ret = sde_rsc_client_state_update(mgr->rsc_client,
+				on ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE,
+				NULL, -1);
+	else
+		ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
+			mgr->module_power.num_vreg, on);
 	if (ret) {
 		SDEROT_WARN("Rotator regulator failed to %s\n",
 			on ? "enable" : "disable");
@@ -330,7 +335,7 @@
 	if (mgr->ops_hw_post_pmevent)
 		mgr->ops_hw_post_pmevent(mgr, on);
 
-	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && !on) {
+	if (!on) {
 		mgr->minimum_bw_vote = 0;
 		sde_rotator_update_perf(mgr);
 	}
@@ -407,13 +412,13 @@
 			if (ret)
 				goto error_mdss_axi;
 			ret = sde_rotator_enable_clk(mgr,
-						SDE_ROTATOR_CLK_ROT_CORE);
-			if (ret)
-				goto error_rot_core;
-			ret = sde_rotator_enable_clk(mgr,
 						SDE_ROTATOR_CLK_MDSS_ROT);
 			if (ret)
 				goto error_mdss_rot;
+			ret = sde_rotator_enable_clk(mgr,
+						SDE_ROTATOR_CLK_MDSS_ROT_SUB);
+			if (ret)
+				goto error_rot_sub;
 
 			/* Active+Sleep */
 			msm_bus_scale_client_update_context(
@@ -421,8 +426,9 @@
 				mgr->data_bus.curr_bw_uc_idx);
 			trace_rot_bw_ao_as_context(0);
 		} else {
+			sde_rotator_disable_clk(mgr,
+					SDE_ROTATOR_CLK_MDSS_ROT_SUB);
 			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
-			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
 			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
 			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
 			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AXI);
@@ -438,9 +444,9 @@
 	}
 
 	return ret;
+error_rot_sub:
+	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
 error_mdss_rot:
-	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
-error_rot_core:
 	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
 error_mdss_axi:
 	sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
@@ -551,6 +557,12 @@
 	if (!input)
 		dir = DMA_FROM_DEVICE;
 
+	if (buffer->plane_count > SDE_ROT_MAX_PLANES) {
+		SDEROT_ERR("buffer plane_count exceeds MAX_PLANE limit:%d\n",
+				buffer->plane_count);
+		return -EINVAL;
+	}
+
 	data->sbuf = buffer->sbuf;
 	data->scid = buffer->scid;
 	data->writeback = buffer->writeback;
@@ -2731,11 +2743,19 @@
 			sde_rotator_search_dt_clk(pdev, mgr, "axi_clk",
 				SDE_ROTATOR_CLK_MDSS_AXI, true) ||
 			sde_rotator_search_dt_clk(pdev, mgr, "rot_core_clk",
-				SDE_ROTATOR_CLK_ROT_CORE, true) ||
-			sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
-				SDE_ROTATOR_CLK_MDSS_ROT, true))
+				SDE_ROTATOR_CLK_MDSS_ROT, false))
 		rc = -EINVAL;
 
+	/*
+	 * If 'MDSS_ROT' is already present, place 'rot_clk' under
+	 * MDSS_ROT_SUB. Otherwise, place it directly into MDSS_ROT.
+	 */
+	if (sde_rotator_get_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT))
+		rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
+				SDE_ROTATOR_CLK_MDSS_ROT_SUB, true);
+	else
+		rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
+				SDE_ROTATOR_CLK_MDSS_ROT, true);
 clk_err:
 	return rc;
 }
@@ -2766,9 +2786,21 @@
 {
 	int ret;
 
-	ret = sde_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
-	if (ret)
+	mgr->rsc_client = sde_rsc_client_create(
+			SDE_RSC_INDEX, "sde_rotator_core", false);
+	if (IS_ERR(mgr->rsc_client)) {
+		ret = PTR_ERR(mgr->rsc_client);
+		pr_err("rsc client create returned %d\n", ret);
+		mgr->rsc_client = NULL;
 		return ret;
+	}
+
+	if (!mgr->rsc_client) {
+		ret = sde_rotator_get_dt_vreg_data(
+				&pdev->dev, &mgr->module_power);
+		if (ret)
+			return ret;
+	}
 
 	ret = sde_rotator_register_clk(pdev, mgr);
 	if (ret)
@@ -2788,9 +2820,15 @@
 {
 	struct platform_device *pdev = mgr->pdev;
 
-	sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
 	sde_rotator_unregister_clk(mgr);
 	sde_rotator_bus_scale_unregister(mgr);
+
+	if (mgr->rsc_client) {
+		sde_rsc_client_destroy(mgr->rsc_client);
+		mgr->rsc_client = NULL;
+	} else {
+		sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+	}
 }
 
 int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 819f57b..0051e96 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -125,7 +125,7 @@
 enum sde_rotator_clk_type {
 	SDE_ROTATOR_CLK_MDSS_AHB,
 	SDE_ROTATOR_CLK_MDSS_AXI,
-	SDE_ROTATOR_CLK_ROT_CORE,
+	SDE_ROTATOR_CLK_MDSS_ROT_SUB,
 	SDE_ROTATOR_CLK_MDSS_ROT,
 	SDE_ROTATOR_CLK_MNOC_AHB,
 	SDE_ROTATOR_CLK_GCC_AHB,
@@ -373,6 +373,7 @@
  * @reg_bus: register bus configuration state
  * @module_power: power/clock configuration state
  * @regulator_enable: true if foot switch is enabled; false otherwise
+ * @rsc_client: pointer to rsc client handle
  * @res_ref_cnt: reference count of how many times resource is requested
  * @rot_enable_clk_cnt: reference count of how many times clock is requested
  * @rot_clk: array of rotator and periphery clocks
@@ -417,6 +418,8 @@
 	struct sde_module_power module_power;
 	bool regulator_enable;
 
+	struct sde_rsc_client *rsc_client;
+
 	int res_ref_cnt;
 	int rot_enable_clk_cnt;
 	struct sde_rot_clk *rot_clk;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 15ecc10..90b7194 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -438,6 +438,8 @@
 {
 	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
+	struct sde_rotator_request *request;
+	struct list_head *curr, *next;
 	int i;
 	int ret;
 
@@ -458,6 +460,21 @@
 		sde_rot_mgr_lock(rot_dev->mgr);
 		sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
 		sde_rot_mgr_unlock(rot_dev->mgr);
+		list_for_each_safe(curr, next, &ctx->pending_list) {
+			request = container_of(curr, struct sde_rotator_request,
+						list);
+
+			SDEDEV_DBG(rot_dev->dev, "cancel request s:%d\n",
+					ctx->session_id);
+			mutex_unlock(q->lock);
+			cancel_work_sync(&request->submit_work);
+			cancel_work_sync(&request->retire_work);
+			mutex_lock(q->lock);
+			spin_lock(&ctx->list_lock);
+			list_del_init(&request->list);
+			list_add_tail(&request->list, &ctx->retired_list);
+			spin_unlock(&ctx->list_lock);
+		}
 	}
 
 	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 8f2746d..c147b0b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -52,8 +52,8 @@
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
-#define DEFAULT_UBWC_MALSIZE	1
-#define DEFAULT_UBWC_SWIZZLE	1
+#define DEFAULT_UBWC_MALSIZE	0
+#define DEFAULT_UBWC_SWIZZLE	0
 
 #define DEFAULT_MAXLINEWIDTH	4096
 
@@ -2366,7 +2366,6 @@
 	/* features exposed via mdss h/w version */
 	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
-		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
@@ -2597,6 +2596,11 @@
 	}
 
 	if ((src_w != dst_w) || (src_h != dst_h)) {
+		if (!dst_w || !dst_h) {
+			SDEROT_DBG("zero output width/height not support\n");
+			ret = -EINVAL;
+			goto dnsc_err;
+		}
 		if ((src_w % dst_w) || (src_h % dst_h)) {
 			SDEROT_DBG("non integral scale not support\n");
 			ret = -EINVAL;
@@ -3078,9 +3082,9 @@
 		goto error_hw_rev_init;
 
 	/* set rotator CBCR to shutoff memory/periphery on clock off.*/
-	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
+	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
 			CLKFLAG_NORETAIN_MEM);
-	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
+	clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
 			CLKFLAG_NORETAIN_PERIPH);
 
 	mdata->sde_rot_hw = rot;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index ac6ded0..9331c94 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -32,7 +32,6 @@
 		HFI_H264_PROFILE_CONSTRAINED_BASE,
 	[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
 		HFI_H264_PROFILE_CONSTRAINED_HIGH,
-	[ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
 };
 
 static int entropy_mode[] = {
@@ -920,6 +919,8 @@
 	pkt->session_id = hash32_ptr(session);
 	pkt->num_properties = 1;
 
+	dprintk(VIDC_DBG, "Setting HAL Property = 0x%x\n", ptype);
+
 	switch (ptype) {
 	case HAL_CONFIG_FRAME_RATE:
 	{
@@ -1273,6 +1274,7 @@
 		hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
 			hal_quant->qpb << 16;
 		hfi->layer_id = hal_quant->layer_id;
+		hfi->enable = hal_quant->enable;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
 		break;
 	}
@@ -1477,22 +1479,22 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_VUI_TIMING_INFO:
+	case HAL_PARAM_VENC_VUI_TIMING_INFO:
 	{
-		struct hfi_h264_vui_timing_info *hfi;
-		struct hal_h264_vui_timing_info *timing_info = pdata;
+		struct hfi_vui_timing_info *hfi;
+		struct hal_vui_timing_info *timing_info = pdata;
 
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+			HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO;
 
-		hfi = (struct hfi_h264_vui_timing_info *)&pkt->
+		hfi = (struct hfi_vui_timing_info *)&pkt->
 			rg_property_data[1];
 		hfi->enable = timing_info->enable;
 		hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
 		hfi->time_scale = timing_info->time_scale;
 
 		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_h264_vui_timing_info);
+			sizeof(struct hfi_vui_timing_info);
 		break;
 	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index c82db74..c5c4269 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -135,6 +135,14 @@
 	return msm_vidc_s_ext_ctrl((void *)vidc_inst, a);
 }
 
+int msm_v4l2_g_ext_ctrl(struct file *file, void *fh,
+					struct v4l2_ext_controls *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_g_ext_ctrl((void *)vidc_inst, a);
+}
+
 int msm_v4l2_reqbufs(struct file *file, void *fh,
 				struct v4l2_requestbuffers *b)
 {
@@ -250,6 +258,7 @@
 	.vidioc_g_ctrl = msm_v4l2_g_ctrl,
 	.vidioc_queryctrl = msm_v4l2_queryctrl,
 	.vidioc_s_ext_ctrls = msm_v4l2_s_ext_ctrl,
+	.vidioc_g_ext_ctrls = msm_v4l2_g_ext_ctrl,
 	.vidioc_subscribe_event = msm_v4l2_subscribe_event,
 	.vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event,
 	.vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 3d3b7e9..c42d7aa 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -23,7 +23,6 @@
 #define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
 #define MB_SIZE_IN_PIXEL (16 * 16)
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 
 static const char *const mpeg_video_stream_format[] = {
@@ -220,13 +219,9 @@
 		.name = "VP8 Profile Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
-		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3,
 		.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
-		.menu_skip_mask = ~(
-			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
-			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
-			(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
-		),
+		.menu_skip_mask = 0,
 		.qmenu = vp8_profile_level,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
@@ -360,7 +355,7 @@
 		.name = "Set Decoder Operating rate",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = MAX_OPERATING_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = OPERATING_FRAME_RATE_STEP,
 	},
@@ -368,17 +363,6 @@
 
 #define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
 
-static u32 get_frame_size_nv12(int plane,
-					u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
 static u32 get_frame_size_compressed_full_yuv(int plane,
 					u32 max_mbs_per_frame, u32 size_per_mb)
 {
@@ -391,11 +375,6 @@
 	return (max_mbs_per_frame * size_per_mb * 3/2)/2;
 }
 
-static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
-}
-
 static u32 get_frame_size(struct msm_vidc_inst *inst,
 					const struct msm_vidc_format *fmt,
 					int fmt_type, int plane)
@@ -446,7 +425,7 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
 		.description = "UBWC Y/CbCr 4:2:0 10bit",
 		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
-		.get_frame_size = get_frame_size_nv12_ubwc_10bit,
+		.get_frame_size = get_frame_size_tp10_ubwc,
 		.type = CAPTURE_PORT,
 	},
 	{
@@ -681,7 +660,7 @@
 	inst->bufq[OUTPUT_PORT].num_planes = 1;
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->prop.fps = DEFAULT_FPS;
-	inst->operating_rate = 0;
+	inst->clk_data.operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
 			sizeof(struct msm_vidc_format));
 	memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
@@ -968,8 +947,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
 		dprintk(VIDC_DBG,
 			"inst(%pK) operating rate changed from %d to %d\n",
-			inst, inst->operating_rate >> 16, ctrl->val >> 16);
-		inst->operating_rate = ctrl->val;
+			inst, inst->clk_data.operating_rate >> 16,
+				ctrl->val >> 16);
+		inst->clk_data.operating_rate = ctrl->val;
+
+		msm_vidc_update_operating_rate(inst);
+
 		break;
 	default:
 		break;
@@ -980,8 +963,8 @@
 
 	if (!rc && property_id) {
 		dprintk(VIDC_DBG,
-			"Control: HAL property=%#x,ctrl: id=%#x,value=%#x\n",
-			property_id, ctrl->id, ctrl->val);
+			"Control: Name = %s, ID = 0x%x Value = %d\n",
+				ctrl->name, ctrl->id, ctrl->val);
 		rc = call_hfi_op(hdev, session_set_property, (void *)
 				inst->session, property_id, pdata);
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e198d8e..e3d52bf 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -23,7 +23,6 @@
 #define DEFAULT_BIT_RATE 64000
 #define BIT_RATE_STEP 100
 #define DEFAULT_FRAME_RATE 15
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 #define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
 #define MIN_SLICE_BYTE_SIZE 512
@@ -286,6 +285,17 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK,
+		.name = "QP mask for diff frame types",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 7,
+		.default_value = 7,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
 		.name = "Intra Period for B frames",
 		.type = V4L2_CTRL_TYPE_INTEGER,
@@ -422,13 +432,9 @@
 		.name = "VP8 Profile Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
-		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3,
 		.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
-		.menu_skip_mask = ~(
-		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
-		),
+		.menu_skip_mask = 0,
 		.qmenu = vp8_profile_level,
 	},
 	{
@@ -867,7 +873,7 @@
 		.name = "Set Encoder Operating rate",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = MAX_OPERATING_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = OPERATING_FRAME_RATE_STEP,
 	},
@@ -978,31 +984,20 @@
 			(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)),
 		.qmenu = iframe_sizes,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+		.name = "Frame Rate based Rate Control",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.step = 1,
+	},
 
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
 
-static u32 get_frame_size_nv12(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
-static u32 get_frame_size_rgba(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
-}
-
-static u32 get_frame_size_nv21(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
-}
-
 static u32 get_frame_size_compressed(int plane, u32 height, u32 width)
 {
 	int sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2;
@@ -1060,6 +1055,13 @@
 		.get_frame_size = get_frame_size_nv21,
 		.type = OUTPUT_PORT,
 	},
+	{
+		.name = "TP10 UBWC 4:2:0",
+		.description = "TP10 UBWC 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
+		.get_frame_size = get_frame_size_tp10_ubwc,
+		.type = OUTPUT_PORT,
+	},
 };
 
 static int msm_venc_set_csc(struct msm_vidc_inst *inst);
@@ -1134,6 +1136,7 @@
 	int max_hierp_layers;
 	int baselayerid = 0;
 	struct hal_video_signal_info signal_info = {0};
+	struct hal_vui_timing_info vui_timing_info = {0};
 	enum hal_iframesize_type iframesize_type = HAL_IFRAMESIZE_TYPE_DEFAULT;
 
 	if (!inst || !inst->core || !inst->core->device) {
@@ -1222,9 +1225,9 @@
 	{
 		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
-		inst->bitrate = ctrl->val;
+		inst->clk_data.bitrate = ctrl->val;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -1246,7 +1249,7 @@
 
 		property_id = HAL_CONFIG_VENC_MAX_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		break;
 	}
@@ -1301,10 +1304,10 @@
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
-		profile_level.profile = msm_comm_v4l2_to_hal(
+		profile_level.profile = HAL_VPX_PROFILE_MAIN;
+		profile_level.level = msm_comm_v4l2_to_hal(
 				V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
 				ctrl->val);
-		profile_level.level = HAL_VPX_PROFILE_UNUSED;
 		pdata = &profile_level;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
@@ -1644,43 +1647,65 @@
 		pdata = &baselayerid;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP: {
-		struct v4l2_ctrl *qpp, *qpb;
+		struct v4l2_ctrl *qpp, *qpb, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
 		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpi = ctrl->val;
 		quant.qpp = qpp->val;
 		quant.qpb = qpb->val;
+		quant.enable = mask->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP: {
-		struct v4l2_ctrl *qpi, *qpb;
+		struct v4l2_ctrl *qpi, *qpb, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
 		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpp = ctrl->val;
 		quant.qpi = qpi->val;
 		quant.qpb = qpb->val;
+		quant.enable = mask->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP: {
-		struct v4l2_ctrl *qpp, *qpi;
+		struct v4l2_ctrl *qpp, *qpi, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
 		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpb = ctrl->val;
 		quant.qpp = qpp->val;
 		quant.qpi = qpi->val;
+		quant.enable = mask->val;
+		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
+		pdata = &quant;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK: {
+		struct v4l2_ctrl *qpi, *qpp, *qpb;
+
+		property_id = HAL_CONFIG_VENC_FRAME_QP;
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+
+		quant.qpi = qpi->val;
+		quant.qpp = qpp->val;
+		quant.qpb = qpb->val;
+		quant.enable = ctrl->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
@@ -1714,8 +1739,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
 		dprintk(VIDC_DBG,
 			"inst(%pK) operating rate changed from %d to %d\n",
-			inst, inst->operating_rate >> 16, ctrl->val >> 16);
-		inst->operating_rate = ctrl->val;
+			inst, inst->clk_data.operating_rate >> 16,
+				ctrl->val >> 16);
+		inst->clk_data.operating_rate = ctrl->val;
+
+		msm_vidc_update_operating_rate(inst);
+
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
 	{
@@ -1798,6 +1827,7 @@
 		else
 			enable.enable = 0;
 		pdata = &enable;
+		inst->clk_data.low_latency_mode = (bool) enable.enable;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
@@ -1825,6 +1855,43 @@
 				ctrl->val);
 		pdata = &iframesize_type;
 		break;
+	case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+	{
+		property_id = HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP;
+		enable.enable = ctrl->val;
+		pdata = &enable;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_VUI_TIMING_INFO:
+	{
+		struct v4l2_ctrl *rc_mode;
+		bool cfr = false;
+
+		property_id = HAL_PARAM_VENC_VUI_TIMING_INFO;
+		pdata = &vui_timing_info;
+
+		if (ctrl->val != V4L2_MPEG_VIDC_VIDEO_VUI_TIMING_INFO_ENABLED) {
+			vui_timing_info.enable = 0;
+			break;
+		}
+
+		rc_mode = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+
+		switch (rc_mode->val) {
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+		case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR:
+			cfr = true;
+			break;
+		default:
+			cfr = false;
+		}
+
+		vui_timing_info.enable = 1;
+		vui_timing_info.fixed_frame_rate = cfr;
+		vui_timing_info.time_scale = NSEC_PER_SEC;
+		break;
+	}
 	default:
 		dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
 		rc = -ENOTSUPP;
@@ -1835,9 +1902,9 @@
 #undef TRY_GET_CTRL
 
 	if (!rc && property_id) {
-		dprintk(VIDC_DBG, "Control: HAL property=%x,ctrl_value=%d\n",
-				property_id,
-				ctrl->val);
+		dprintk(VIDC_DBG,
+			"Control: Name = %s, ID = 0x%x Value = %d\n",
+				ctrl->name, ctrl->id, ctrl->val);
 		rc = call_hfi_op(hdev, session_set_property,
 				(void *)inst->session, property_id, pdata);
 	}
@@ -1852,7 +1919,7 @@
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
-	u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
+	u32 property_id = 0;
 	void *pdata = NULL;
 	struct msm_vidc_capability *cap = NULL;
 	struct hal_aspect_ratio sar;
@@ -1930,76 +1997,76 @@
 			pdata = &blur_res;
 			break;
 		case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
-			layer_id = control[i].value;
+			qp.layer_id = control[i].value;
+			/* Enable QP for all frame types by default */
+			qp.enable = 7;
+			qp_range.layer_id = control[i].value;
+			bitrate.layer_id = control[i].value;
 			i++;
 			while (i < ctrl->count) {
 			switch (control[i].id) {
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
 				qp.qpi = control[i].value;
-				qp.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
 				qp.qpp = control[i].value;
-				qp.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
 				qp.qpb = control[i].value;
-				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK:
+				qp.enable = control[i].value;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
 				qp_range.qpi_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
 				qp_range.qpp_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
-				HAL_PARAM_VENC_SESSION_QP_RANGE;
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
 				qp_range.qpb_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
 				qp_range.qpi_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
 				qp_range.qpp_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
 				qp_range.qpb_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
 				bitrate.bit_rate = control[i].value;
-				bitrate.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_TARGET_BITRATE;
 				pdata = &bitrate;
@@ -2051,7 +2118,7 @@
 	/* To start with, both ports are 1 plane each */
 	inst->bufq[OUTPUT_PORT].num_planes = 1;
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
-	inst->operating_rate = 0;
+	inst->clk_data.operating_rate = 0;
 
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
 			sizeof(struct msm_vidc_format));
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index e071037..1cab039 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -28,6 +28,8 @@
 
 static int try_get_ctrl(struct msm_vidc_inst *inst,
 	struct v4l2_ctrl *ctrl);
+static int msm_vidc_get_count(struct msm_vidc_inst *inst,
+	struct v4l2_ctrl *ctrl);
 
 static int get_poll_flags(void *instance)
 {
@@ -274,6 +276,40 @@
 }
 EXPORT_SYMBOL(msm_vidc_g_ctrl);
 
+int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
+{
+	struct msm_vidc_inst *inst = instance;
+	struct v4l2_ext_control *ext_control;
+	struct v4l2_ctrl ctrl;
+	int i = 0, rc = 0;
+
+	if (!inst || !control)
+		return -EINVAL;
+
+	ext_control = control->controls;
+
+	for (i = 0; i < control->count; i++) {
+		switch (ext_control[i].id) {
+		case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+			ctrl.id = ext_control[i].id;
+			ctrl.val = ext_control[i].value;
+
+			msm_vidc_get_count(inst, &ctrl);
+			ext_control->value = ctrl.val;
+			break;
+		default:
+			dprintk(VIDC_ERR,
+				"This control %x is not supported yet\n",
+					ext_control[i].id);
+			rc = -EINVAL;
+			break;
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(msm_vidc_g_ext_ctrl);
+
 int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
 {
 	struct msm_vidc_inst *inst = instance;
@@ -1187,6 +1223,8 @@
 	buf_count.buffer_type = type;
 	buf_count.buffer_count_actual = act_count;
 	buf_count.buffer_count_min_host = host_count;
+	dprintk(VIDC_DBG, "%s : Act count = %d Host count = %d\n",
+		__func__, act_count, host_count);
 	rc = call_hfi_op(hdev, session_set_property,
 		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
 	if (rc)
@@ -1433,7 +1471,9 @@
 			"Failed to move inst: %pK to start done state\n", inst);
 		goto fail_start;
 	}
-	msm_dcvs_init(inst);
+
+	msm_clock_data_reset(inst);
+
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 		rc = msm_comm_queue_output_buffers(inst);
@@ -1519,6 +1559,9 @@
 		dprintk(VIDC_ERR,
 			"Failed to move inst: %pK to state %d\n",
 				inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+
+	msm_clock_data_reset(inst);
+
 	return rc;
 }
 
@@ -1839,33 +1882,52 @@
 static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
-
-	/*
-	 * HACK: unlock the control prior to querying the hardware.  Otherwise
-	 * lower level code that attempts to do g_ctrl() will end up deadlocking
-	 * us.
-	 */
+	struct hal_buffer_requirements *bufreq = NULL;
+	enum hal_buffer buffer_type;
 
 	switch (ctrl->id) {
 
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
-	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
 		ctrl->val = inst->profile;
-	break;
+		break;
 
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
 		ctrl->val = inst->level;
-	break;
+		break;
 
 	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
 		ctrl->val = inst->entropy_mode;
-	break;
+		break;
 
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (inst->in_reconfig)
+			msm_comm_try_get_bufreqs(inst);
+
+		buffer_type = msm_comm_get_hal_output_buffer(inst);
+		bufreq = get_buff_req_buffer(inst,
+			buffer_type);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					buffer_type);
+			return -EINVAL;
+		}
+		ctrl->val = bufreq->buffer_count_min_host;
+		break;
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
-		rc = msm_vidc_get_count(inst, ctrl);
+		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					HAL_BUFFER_INPUT);
+			return -EINVAL;
+		}
+		ctrl->val = bufreq->buffer_count_min_host;
 		break;
 	default:
 		/*
@@ -1873,7 +1935,7 @@
 		 * modify ctrl->value
 		 */
 		break;
-}
+	}
 
 	return rc;
 }
@@ -1971,10 +2033,11 @@
 	inst->session_type = session_type;
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
-	inst->freq = 0;
-	inst->core_id = VIDC_CORE_ID_DEFAULT;
+	inst->clk_data.min_freq = 0;
+	inst->clk_data.curr_freq = 0;
+	inst->clk_data.bitrate = 0;
+	inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
-	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
 	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -2031,12 +2094,15 @@
 		goto fail_init;
 	}
 
+	msm_dcvs_try_enable(inst);
 	if (msm_vidc_check_for_inst_overload(core)) {
 		dprintk(VIDC_ERR,
 			"Instance count reached Max limit, rejecting session");
 		goto fail_init;
 	}
 
+	msm_comm_scale_clocks_and_bus(inst);
+
 	inst->debugfs_root =
 		msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index d43ae5a..b80aa08 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -23,17 +23,12 @@
 	struct msm_vidc_inst *inst = NULL;
 	struct vidc_bus_vote_data *vote_data = NULL;
 
-	if (!core) {
+	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
 
 	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-				__func__, hdev);
-		return -EINVAL;
-	}
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list)
@@ -65,12 +60,17 @@
 		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
 				inst->prop.height[OUTPUT_PORT]);
 
-		if (inst->operating_rate)
-			vote_data[i].fps = (inst->operating_rate >> 16) ?
-				inst->operating_rate >> 16 : 1;
+		if (inst->clk_data.operating_rate)
+			vote_data[i].fps =
+				(inst->clk_data.operating_rate >> 16) ?
+				inst->clk_data.operating_rate >> 16 : 1;
 		else
 			vote_data[i].fps = inst->prop.fps;
 
+		if (!msm_vidc_clock_scaling ||
+			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
+			vote_data[i].power_mode = VIDC_POWER_TURBO;
+
 		/*
 		 * TODO: support for OBP-DBP split mode hasn't been yet
 		 * implemented, once it is, this part of code needs to be
@@ -126,18 +126,19 @@
 	int buffers_outside_fw = 0;
 	struct msm_vidc_core *core;
 	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
+	struct clock_data *dcvs;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
 		return -EINVAL;
 	}
-	if (!inst->dcvs_mode) {
+
+	if (!inst->clk_data.dcvs_mode) {
 		dprintk(VIDC_DBG, "DCVS is not enabled\n");
 		return 0;
 	}
 
-	dcvs = &inst->dcvs;
+	dcvs = &inst->clk_data;
 
 	core = inst->core;
 	mutex_lock(&inst->lock);
@@ -210,7 +211,7 @@
 	}
 	mutex_unlock(&inst->freqs.lock);
 
-	inst->dcvs.buffer_counter++;
+	inst->clk_data.buffer_counter++;
 }
 
 
@@ -227,12 +228,13 @@
 
 	/* If current requirement is within DCVS limits, try DCVS. */
 
-	if (freq < inst->dcvs.load_high) {
+	if (freq < inst->clk_data.load_high) {
 		dprintk(VIDC_DBG, "Calling DCVS now\n");
 		// TODO calling DCVS here may reduce the residency. Re-visit.
 		msm_dcvs_scale_clocks(inst);
-		freq = inst->dcvs.load;
+		freq = inst->clk_data.load;
 	}
+	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
 
 	return freq;
 }
@@ -274,9 +276,10 @@
 	unsigned long freq = 0;
 	unsigned long vpp_cycles = 0, vsp_cycles = 0;
 	u32 vpp_cycles_per_mb;
-	u32 mbs_per_frame;
+	u32 mbs_per_second;
 
-	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+	mbs_per_second = msm_comm_get_inst_load(inst,
+		LOAD_CALC_NO_QUIRKS);
 
 	/*
 	 * Calculate vpp, vsp cycles separately for encoder and decoder.
@@ -286,17 +289,17 @@
 
 	if (inst->session_type == MSM_VIDC_ENCODER) {
 		vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
-			inst->entry->low_power_cycles :
-			inst->entry->vpp_cycles;
+			inst->clk_data.entry->low_power_cycles :
+			inst->clk_data.entry->vpp_cycles;
 
-		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 
 		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->bitrate * 10) / 7;
+		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
 	} else if (inst->session_type == MSM_VIDC_DECODER) {
-		vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
 
-		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 		/* 10 / 7 is overhead factor */
 		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
 
@@ -306,6 +309,8 @@
 		return freq;
 	}
 
+	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+
 	freq = max(vpp_cycles, vsp_cycles);
 
 	return freq;
@@ -321,7 +326,7 @@
 
 	hdev = core->device;
 	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	if (!hdev || !allowed_clks_tbl) {
+	if (!allowed_clks_tbl) {
 		dprintk(VIDC_ERR,
 			"%s Invalid parameters\n", __func__);
 		return -EINVAL;
@@ -329,35 +334,104 @@
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		freq += temp->freq;
+		freq += temp->clk_data.curr_freq;
 	}
 	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
 		rate = allowed_clks_tbl[i].clock_rate;
 		if (rate >= freq)
 			break;
 	}
+	core->min_freq = freq;
+	core->curr_freq = rate;
 	mutex_unlock(&core->lock);
 
-	core->freq = rate;
-	dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+	dprintk(VIDC_PROF, "Min freq = %lu Current Freq = %lu\n",
+		core->min_freq, core->curr_freq);
 	rc = call_hfi_op(hdev, scale_clocks,
-			hdev->hfi_device_data, rate);
+			hdev->hfi_device_data, core->curr_freq);
 
 	return rc;
 }
 
-static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
 {
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	unsigned long freq = 0;
 
-	allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	freq = allowed_clks_tbl[0].clock_rate;
 	dprintk(VIDC_PROF, "Max rate = %lu", freq);
 
 	return freq;
 }
 
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
+{
+	struct v4l2_ctrl *ctrl = NULL;
+	struct msm_vidc_inst *temp;
+	struct msm_vidc_core *core;
+	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
+	unsigned long mbs_per_frame;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	max_freq = msm_vidc_max_freq(core);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp == inst ||
+				temp->state < MSM_VIDC_START_DONE ||
+				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+			continue;
+
+		freq += temp->clk_data.min_freq;
+	}
+
+	freq_left = max_freq - freq;
+
+	list_for_each_entry(temp, &core->instances, list) {
+
+		mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+		cycles = temp->clk_data.entry->vpp_cycles;
+		if (inst->session_type == MSM_VIDC_ENCODER)
+			cycles = temp->flags & VIDC_LOW_POWER ?
+				inst->clk_data.entry->low_power_cycles :
+				cycles;
+
+		load = cycles * mbs_per_frame;
+
+		ops_left = load ? (freq_left / load) : 0;
+		/* Convert remaining operating rate to Q16 format */
+		ops_left = ops_left << 16;
+
+		ctrl = v4l2_ctrl_find(&temp->ctrl_handler,
+			V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE);
+		if (ctrl) {
+			dprintk(VIDC_DBG,
+				"%s: Before Range = %lld --> %lld\n",
+				ctrl->name, ctrl->minimum, ctrl->maximum);
+			dprintk(VIDC_DBG,
+				"%s: Before Def value = %lld Cur val = %d\n",
+				ctrl->name, ctrl->default_value, ctrl->val);
+			v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
+				ctrl->val + ops_left, ctrl->step,
+				ctrl->minimum);
+			dprintk(VIDC_DBG,
+				"%s: Updated Range = %lld --> %lld\n",
+				ctrl->name, ctrl->minimum, ctrl->maximum);
+			dprintk(VIDC_DBG,
+				"%s: Updated Def value = %lld Cur val = %d\n",
+				ctrl->name, ctrl->default_value, ctrl->val);
+		}
+	}
+	mutex_unlock(&core->lock);
+
+	return 0;
+}
+
 int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 {
 	struct vb2_buf_entry *temp, *next;
@@ -365,9 +439,10 @@
 	u32 filled_len = 0;
 	ion_phys_addr_t device_addr = 0;
 
-	if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
-		freq = msm_vidc_max_freq(inst);
-		goto decision_done;
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
 	}
 
 	mutex_lock(&inst->pendingq.lock);
@@ -381,7 +456,7 @@
 	mutex_unlock(&inst->pendingq.lock);
 
 	if (!filled_len || !device_addr) {
-		freq = inst->freq;
+		dprintk(VIDC_PROF, "No Change in frequency\n");
 		goto decision_done;
 	}
 
@@ -391,8 +466,15 @@
 
 	freq = msm_vidc_adjust_freq(inst);
 
+	inst->clk_data.min_freq = freq;
+
+	if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW ||
+		!msm_vidc_clock_scaling)
+		inst->clk_data.curr_freq = msm_vidc_max_freq(inst->core);
+	else
+		inst->clk_data.curr_freq = freq;
+
 decision_done:
-	inst->freq = freq;
 	msm_vidc_set_clocks(inst->core);
 	return 0;
 }
@@ -422,29 +504,29 @@
 
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
 {
-	bool force_disable = false;
-
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
 		return -EINVAL;
 	}
 
-	force_disable = inst->session_type == MSM_VIDC_ENCODER ?
-		!msm_vidc_enc_dcvs_mode :
-		!msm_vidc_dec_dcvs_mode;
-
-	if (force_disable || inst->flags & VIDC_THUMBNAIL) {
-		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
-			inst);
-		inst->dcvs.extra_capture_buffer_count = 0;
-		inst->dcvs.extra_output_buffer_count = 0;
+	if (!msm_vidc_clock_scaling ||
+			inst->flags & VIDC_THUMBNAIL ||
+			inst->clk_data.low_latency_mode) {
+		dprintk(VIDC_PROF,
+			"This session doesn't need DCVS : %pK\n",
+				inst);
+		inst->clk_data.extra_capture_buffer_count = 0;
+		inst->clk_data.extra_output_buffer_count = 0;
+		inst->clk_data.dcvs_mode = false;
 		return false;
 	}
-	inst->dcvs_mode = true;
+	inst->clk_data.dcvs_mode = true;
 
 	// TODO : Update with proper number based on on-target tuning.
-	inst->dcvs.extra_capture_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
-	inst->dcvs.extra_output_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->clk_data.extra_capture_buffer_count =
+		DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->clk_data.extra_output_buffer_count =
+		DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
 	return true;
 }
 
@@ -482,6 +564,12 @@
 	struct clock_profile_entry *entry = NULL;
 	int fourcc;
 
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
+
 	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
 	fourcc = inst->session_type == MSM_VIDC_DECODER ?
 		inst->fmts[OUTPUT_PORT].fourcc :
@@ -498,7 +586,7 @@
 				inst->session_type);
 
 		if (matched) {
-			inst->entry = entry;
+			inst->clk_data.entry = entry;
 			break;
 		}
 	}
@@ -512,7 +600,7 @@
 	return rc;
 }
 
-static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
+static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
 {
 	dprintk(VIDC_DBG,
 		"DCVS: Load_Low %d, Load High %d\n",
@@ -524,31 +612,31 @@
 		dcvs->min_threshold, dcvs->max_threshold);
 }
 
-void msm_dcvs_init(struct msm_vidc_inst *inst)
+void msm_clock_data_reset(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core;
-	int i = 0;
+	int i = 0, rc = 0;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u64 total_freq = 0, rate = 0, load;
 	int cycles;
-	struct dcvs_stats *dcvs;
+	struct clock_data *dcvs;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
 	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
 		return;
 	}
 
 	core = inst->core;
-	dcvs = &inst->dcvs;
-	inst->dcvs = (struct dcvs_stats){0};
+	dcvs = &inst->clk_data;
 	load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-	cycles = inst->entry->vpp_cycles;
+	cycles = inst->clk_data.entry->vpp_cycles;
 	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	if (inst->session_type == MSM_VIDC_ENCODER) {
 		cycles = inst->flags & VIDC_LOW_POWER ?
-			inst->entry->low_power_cycles :
+			inst->clk_data.entry->low_power_cycles :
 			cycles;
 
 		dcvs->buffer_type = HAL_BUFFER_INPUT;
@@ -573,7 +661,17 @@
 	dcvs->load = dcvs->load_high = rate;
 	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
 
+	inst->clk_data.buffer_counter = 0;
+
 	msm_dcvs_print_dcvs_stats(dcvs);
+
+	msm_vidc_update_operating_rate(inst);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR, "%s Failed to scale Clocks and Bus\n",
+			__func__);
 }
 
 int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
@@ -585,23 +683,26 @@
 	}
 
 	return buffer_type == HAL_BUFFER_INPUT ?
-		inst->dcvs.extra_output_buffer_count :
-		inst->dcvs.extra_capture_buffer_count;
+		inst->clk_data.extra_output_buffer_count :
+		inst->clk_data.extra_capture_buffer_count;
 }
 
 int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
-	bool low_latency_mode;
 	struct hfi_device *hdev;
 	struct hal_video_work_mode pdata;
 	struct hal_enable latency;
 
-	hdev = inst->core->device;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR,
+			"%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
 
-	low_latency_mode = msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE);
-	if (low_latency_mode) {
+	hdev = inst->core->device;
+	if (inst->clk_data.low_latency_mode) {
 		pdata.video_work_mode = VIDC_WORK_MODE_1;
 		goto decision_done;
 	}
@@ -636,7 +737,7 @@
 
 decision_done:
 
-	inst->work_mode = pdata.video_work_mode;
+	inst->clk_data.work_mode = pdata.video_work_mode;
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
 			(void *)&pdata);
@@ -646,7 +747,8 @@
 
 	/* For WORK_MODE_1, set Low Latency mode by default to HW. */
 
-	if (inst->work_mode == VIDC_WORK_MODE_1) {
+	if (inst->session_type == MSM_VIDC_ENCODER &&
+			inst->clk_data.work_mode == VIDC_WORK_MODE_1) {
 		latency.enable = 1;
 		rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
@@ -675,8 +777,8 @@
 		return 0;
 	}
 	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
-	if (inst->core->resources.max_hq_mbs_per_frame > mbs_per_frame ||
-		inst->core->resources.max_hq_fps > inst->prop.fps) {
+	if (mbs_per_frame >= inst->core->resources.max_hq_mbs_per_frame ||
+		inst->prop.fps >= inst->core->resources.max_hq_fps) {
 		enable = true;
 	}
 
@@ -692,9 +794,12 @@
 			__func__, inst);
 		goto fail_power_mode_set;
 	}
-	inst->flags |= VIDC_LOW_POWER;
-	dprintk(VIDC_PROF, "Power Save Mode set for inst: %pK\n", inst);
+	inst->flags = enable ?
+		inst->flags | VIDC_LOW_POWER :
+		inst->flags & ~VIDC_LOW_POWER;
 
+	dprintk(VIDC_PROF,
+		"Power Save Mode for inst: %pK Enable = %d\n", inst, enable);
 fail_power_mode_set:
 	return rc;
 }
@@ -704,15 +809,10 @@
 {
 	struct msm_vidc_inst *inst = NULL;
 
-	if (!core) {
-		dprintk(VIDC_ERR, "Invalid args: %pK\n", core);
-		return -EINVAL;
-	}
-
 	dprintk(VIDC_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->core_id == core_id &&
+		if (inst->clk_data.core_id == core_id &&
 			inst->session_type == MSM_VIDC_ENCODER)
 			msm_vidc_power_save_mode_enable(inst, true);
 	}
@@ -731,19 +831,19 @@
 	list_for_each_entry(inst, &core->instances, list) {
 		u32 cycles, lp_cycles;
 
-		if (!inst->core_id && core_id)
+		if (!(inst->clk_data.core_id & core_id))
 			continue;
 		if (inst->session_type == MSM_VIDC_DECODER) {
-			cycles = lp_cycles = inst->entry->vpp_cycles;
+			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
 		} else if (inst->session_type == MSM_VIDC_ENCODER) {
 			lp_mode |= inst->flags & VIDC_LOW_POWER;
 			cycles = lp_mode ?
-				inst->entry->low_power_cycles :
-				inst->entry->vpp_cycles;
+				inst->clk_data.entry->low_power_cycles :
+				inst->clk_data.entry->vpp_cycles;
 		} else {
 			continue;
 		}
-		if (inst->core_id == 3)
+		if (inst->clk_data.core_id == 3)
 			cycles = cycles / 2;
 
 		current_inst_mbs_per_sec = msm_comm_get_inst_load(inst,
@@ -768,10 +868,17 @@
 		min_load = 0, min_lp_load = 0;
 	u32 min_core_id, min_lp_core_id;
 
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR,
+			"%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
+
 	core = inst->core;
 	hdev = core->device;
-	max_freq = msm_vidc_max_freq(inst);
-	inst->core_id = 0;
+	max_freq = msm_vidc_max_freq(inst->core);
+	inst->clk_data.core_id = 0;
 
 	core0_load = get_core_load(core, VIDC_CORE_ID_1, false);
 	core1_load = get_core_load(core, VIDC_CORE_ID_2, false);
@@ -786,11 +893,11 @@
 		VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
 
 	lp_cycles = inst->session_type == MSM_VIDC_ENCODER ?
-			inst->entry->low_power_cycles :
-			inst->entry->vpp_cycles;
+			inst->clk_data.entry->low_power_cycles :
+			inst->clk_data.entry->vpp_cycles;
 
 	current_inst_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
-		inst->entry->vpp_cycles;
+		inst->clk_data.entry->vpp_cycles;
 
 	current_inst_lp_load = msm_comm_get_inst_load(inst,
 		LOAD_CALC_NO_QUIRKS) * lp_cycles;
@@ -814,7 +921,7 @@
 	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
 		if (current_inst_load / 2 + core0_load <= max_freq &&
 			current_inst_load / 2 + core1_load <= max_freq) {
-			inst->core_id = VIDC_CORE_ID_3;
+			inst->clk_data.core_id = VIDC_CORE_ID_3;
 			msm_vidc_power_save_mode_enable(inst, false);
 			goto decision_done;
 		}
@@ -825,32 +932,32 @@
 				core0_lp_load <= max_freq &&
 			current_inst_lp_load / 2 +
 				core1_lp_load <= max_freq) {
-			inst->core_id = VIDC_CORE_ID_3;
+			inst->clk_data.core_id = VIDC_CORE_ID_3;
 			msm_vidc_power_save_mode_enable(inst, true);
 			goto decision_done;
 		}
 	}
 
 	if (current_inst_load + min_load < max_freq) {
-		inst->core_id = min_core_id;
+		inst->clk_data.core_id = min_core_id;
 		dprintk(VIDC_DBG,
 			"Selected normally : Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, false);
 	} else if (current_inst_lp_load + min_load < max_freq) {
 		/* Move current instance to LP and return */
-		inst->core_id = min_core_id;
+		inst->clk_data.core_id = min_core_id;
 		dprintk(VIDC_DBG,
 			"Selected by moving current to LP : Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, true);
 
 	} else if (current_inst_lp_load + min_lp_load < max_freq) {
 		/* Move all instances to LP mode and return */
-		inst->core_id = min_lp_core_id;
+		inst->clk_data.core_id = min_lp_core_id;
 		dprintk(VIDC_DBG,
 			"Moved all inst's to LP: Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_move_core_to_power_save_mode(core, min_lp_core_id);
 	} else {
 		rc = -EINVAL;
@@ -860,7 +967,7 @@
 	}
 
 decision_done:
-	core_info.video_core_enable_mask = inst->core_id;
+	core_info.video_core_enable_mask = inst->clk_data.core_id;
 
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session,
@@ -874,3 +981,4 @@
 	return rc;
 }
 
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index d01f074..fe4822b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -31,7 +31,8 @@
 /* Considering one safeguard buffer */
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
-void msm_dcvs_init(struct msm_vidc_inst *inst);
+void msm_clock_data_reset(struct msm_vidc_inst *inst);
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst);
 int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
 	enum hal_buffer buffer_type);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 853edf5..4f53850 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -321,15 +321,15 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 		switch (value) {
 		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0:
-			return HAL_VPX_PROFILE_VERSION_0;
+			return HAL_VPX_LEVEL_VERSION_0;
 		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1:
-			return HAL_VPX_PROFILE_VERSION_1;
+			return HAL_VPX_LEVEL_VERSION_1;
 		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2:
-			return HAL_VPX_PROFILE_VERSION_2;
+			return HAL_VPX_LEVEL_VERSION_2;
 		case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3:
-			return HAL_VPX_PROFILE_VERSION_3;
+			return HAL_VPX_LEVEL_VERSION_3;
 		case V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED:
-			return HAL_VPX_PROFILE_UNUSED;
+			return HAL_VPX_LEVEL_UNUSED;
 		default:
 			goto unknown_value;
 		}
@@ -584,9 +584,9 @@
 	capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
 		inst->prop.height[CAPTURE_PORT]);
 
-	if (inst->operating_rate) {
-		fps = (inst->operating_rate >> 16) ?
-			inst->operating_rate >> 16 : 1;
+	if (inst->clk_data.operating_rate) {
+		fps = (inst->clk_data.operating_rate >> 16) ?
+			inst->clk_data.operating_rate >> 16 : 1;
 		/*
 		 * Check if operating rate is less than fps.
 		 * If Yes, then use fps to scale clocks
@@ -2510,7 +2510,7 @@
 	}
 
 	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
-	freq = core->freq;
+	freq = core->curr_freq;
 
 	is_turbo = is_core_turbo(core, freq);
 	dprintk(VIDC_DBG,
@@ -4094,9 +4094,8 @@
 	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
 		"buffer type", "count", "mincount_host", "mincount_fw", "size");
 	for (i = 0; i < HAL_BUFFER_MAX; i++) {
-		struct hal_buffer_requirements req = hprop.buf_req.buffer[i];
+		struct hal_buffer_requirements req = inst->buff_req.buffer[i];
 
-		inst->buff_req.buffer[i] = req;
 		if (req.buffer_type != HAL_BUFFER_NONE) {
 			dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n",
 				get_buffer_name(req.buffer_type),
@@ -4738,9 +4737,9 @@
 		return 0;
 	}
 
-	// Finish FLUSH As Soon As Possible.
-	inst->dcvs.buffer_counter = 0;
-	msm_comm_scale_clocks_and_bus(inst);
+	/* Finish FLUSH As Soon As Possible. */
+
+	msm_clock_data_reset(inst);
 
 	msm_comm_flush_dynamic_buffers(inst);
 
@@ -5474,7 +5473,7 @@
 	}
 	core = inst->core;
 
-	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
+	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->curr_freq);
 	mutex_lock(&core->lock);
 	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
 	msm_comm_print_inst_info(inst);
@@ -5520,3 +5519,28 @@
 	mutex_unlock(&inst->lock);
 	return rc;
 }
+
+u32 get_frame_size_nv12(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
+}
+
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
+}
+
+u32 get_frame_size_rgba(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
+}
+
+u32 get_frame_size_nv21(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
+}
+
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 9c7eec5..098063d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -98,4 +98,9 @@
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
 int msm_comm_session_continue(void *instance);
+u32 get_frame_size_nv12(int plane, u32 height, u32 width);
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
+u32 get_frame_size_rgba(int plane, u32 height, u32 width);
+u32 get_frame_size_nv21(int plane, u32 height, u32 width);
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 15ee8a8..f62c132 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -26,12 +26,10 @@
 int msm_vidc_fw_low_power_mode = 1;
 int msm_vidc_hw_rsp_timeout = 2000;
 bool msm_vidc_fw_coverage = !true;
-bool msm_vidc_dec_dcvs_mode = true;
-bool msm_vidc_enc_dcvs_mode = true;
 bool msm_vidc_sys_idle_indicator = !true;
 int msm_vidc_firmware_unload_delay = 15000;
 bool msm_vidc_thermal_mitigation_disabled = !true;
-bool msm_vidc_bitrate_clock_scaling = true;
+bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
 
 #define MAX_DBG_BUF_SIZE 4096
@@ -174,8 +172,6 @@
 	__debugfs_create(x32, "fw_level", &msm_vidc_fw_debug) &&
 	__debugfs_create(u32, "fw_debug_mode", &msm_vidc_fw_debug_mode) &&
 	__debugfs_create(bool, "fw_coverage", &msm_vidc_fw_coverage) &&
-	__debugfs_create(bool, "dcvs_dec_mode", &msm_vidc_dec_dcvs_mode) &&
-	__debugfs_create(bool, "dcvs_enc_mode", &msm_vidc_enc_dcvs_mode) &&
 	__debugfs_create(u32, "fw_low_power_mode",
 			&msm_vidc_fw_low_power_mode) &&
 	__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
@@ -186,8 +182,8 @@
 			&msm_vidc_firmware_unload_delay) &&
 	__debugfs_create(bool, "disable_thermal_mitigation",
 			&msm_vidc_thermal_mitigation_disabled) &&
-	__debugfs_create(bool, "bitrate_clock_scaling",
-			&msm_vidc_bitrate_clock_scaling) &&
+	__debugfs_create(bool, "clock_scaling",
+			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
 			&msm_vidc_debug_timeout);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index cf5ce22..f5c8e5a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -59,12 +59,10 @@
 extern int msm_vidc_fw_low_power_mode;
 extern int msm_vidc_hw_rsp_timeout;
 extern bool msm_vidc_fw_coverage;
-extern bool msm_vidc_dec_dcvs_mode;
-extern bool msm_vidc_enc_dcvs_mode;
 extern bool msm_vidc_sys_idle_indicator;
 extern int msm_vidc_firmware_unload_delay;
 extern bool msm_vidc_thermal_mitigation_disabled;
-extern bool msm_vidc_bitrate_clock_scaling;
+extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
 
 #define VIDC_MSG_PRIO2STRING(__level) ({ \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 53bc068..37bccbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -52,7 +52,7 @@
 
 
 /* Maintains the number of FTB's between each FBD over a window */
-#define DCVS_FTB_WINDOW 32
+#define DCVS_FTB_WINDOW 16
 
 #define V4L2_EVENT_VIDC_BASE  10
 
@@ -205,7 +205,7 @@
 	int ebd;
 };
 
-struct dcvs_stats {
+struct clock_data {
 	int buffer_counter;
 	int load;
 	int load_low;
@@ -215,6 +215,15 @@
 	unsigned int extra_capture_buffer_count;
 	unsigned int extra_output_buffer_count;
 	enum hal_buffer buffer_type;
+	bool dcvs_mode;
+	unsigned long bitrate;
+	unsigned long min_freq;
+	unsigned long curr_freq;
+	u32 operating_rate;
+	struct clock_profile_entry *entry;
+	u32 core_id;
+	enum hal_work_mode work_mode;
+	bool low_latency_mode;
 };
 
 struct profile_data {
@@ -259,7 +268,8 @@
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	bool smmu_fault_handled;
-	unsigned long freq;
+	unsigned long min_freq;
+	unsigned long curr_freq;
 };
 
 struct msm_vidc_inst {
@@ -293,28 +303,21 @@
 	void *priv;
 	struct msm_vidc_debug debug;
 	struct buf_count count;
-	struct dcvs_stats dcvs;
+	struct clock_data clk_data;
 	enum msm_vidc_modes flags;
 	struct msm_vidc_capability capability;
 	u32 buffer_size_limit;
 	enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
 	struct v4l2_ctrl **ctrls;
-	bool dcvs_mode;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	unsigned long bitrate;
-	unsigned long freq;
 	u32 buffers_held_in_driver;
 	atomic_t in_flush;
 	u32 pic_struct;
 	u32 colour_space;
-	u32 operating_rate;
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
-	struct clock_profile_entry *entry;
-	u32 core_id;
-	enum hal_work_mode work_mode;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index f8e0a6a..537a1c6 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -187,7 +187,7 @@
 	HAL_PARAM_VDEC_SYNC_FRAME_DECODE,
 	HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
 	HAL_CONFIG_VENC_MAX_BITRATE,
-	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
+	HAL_PARAM_VENC_VUI_TIMING_INFO,
 	HAL_PARAM_VENC_GENERATE_AUDNAL,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
@@ -371,15 +371,18 @@
 };
 
 enum hal_vpx_profile {
-	HAL_VPX_PROFILE_SIMPLE    = 0x00000001,
-	HAL_VPX_PROFILE_ADVANCED  = 0x00000002,
-	HAL_VPX_PROFILE_VERSION_0 = 0x00000004,
-	HAL_VPX_PROFILE_VERSION_1 = 0x00000008,
-	HAL_VPX_PROFILE_VERSION_2 = 0x00000010,
-	HAL_VPX_PROFILE_VERSION_3 = 0x00000020,
+	HAL_VPX_PROFILE_MAIN    = 0x00000001,
 	HAL_VPX_PROFILE_UNUSED = 0x10000000,
 };
 
+enum hal_vpx_level {
+	HAL_VPX_LEVEL_VERSION_0 = 0x00000001,
+	HAL_VPX_LEVEL_VERSION_1 = 0x00000002,
+	HAL_VPX_LEVEL_VERSION_2 = 0x00000004,
+	HAL_VPX_LEVEL_VERSION_3 = 0x00000008,
+	HAL_VPX_LEVEL_UNUSED = 0x10000000,
+};
+
 struct hal_frame_rate {
 	enum hal_buffer buffer_type;
 	u32 frame_rate;
@@ -595,6 +598,7 @@
 	u32 qpp;
 	u32 qpb;
 	u32 layer_id;
+	u32 enable;
 };
 
 struct hal_quantization_range {
@@ -812,7 +816,7 @@
 };
 
 
-struct hal_h264_vui_timing_info {
+struct hal_vui_timing_info {
 	u32 enable;
 	u32 fixed_frame_rate;
 	u32 time_scale;
@@ -1035,7 +1039,7 @@
 	struct hal_codec_supported codec_supported;
 	struct hal_multi_view_select multi_view_select;
 	struct hal_timestamp_scale timestamp_scale;
-	struct hal_h264_vui_timing_info h264_vui_timing_info;
+	struct hal_vui_timing_info vui_timing_info;
 	struct hal_preserve_text_quality preserve_text_quality;
 	struct hal_buffer_info buffer_info;
 	struct hal_buffer_alloc_mode buffer_alloc_mode;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index d6c4bcb..2dd25f3 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -122,12 +122,12 @@
 #define HFI_MPEG2_LEVEL_H14					0x00000004
 #define HFI_MPEG2_LEVEL_HL					0x00000008
 
-#define HFI_VPX_PROFILE_SIMPLE				0x00000001
-#define HFI_VPX_PROFILE_ADVANCED			0x00000002
-#define HFI_VPX_PROFILE_VERSION_0			0x00000004
-#define HFI_VPX_PROFILE_VERSION_1			0x00000008
-#define HFI_VPX_PROFILE_VERSION_2			0x00000010
-#define HFI_VPX_PROFILE_VERSION_3			0x00000020
+#define HFI_VPX_PROFILE_MAIN			0x00000001
+
+#define HFI_VPX_LEVEL_VERSION_0			0x00000001
+#define HFI_VPX_LEVEL_VERSION_1			0x00000002
+#define HFI_VPX_LEVEL_VERSION_2			0x00000004
+#define HFI_VPX_LEVEL_VERSION_3			0x00000008
 
 #define  HFI_HEVC_PROFILE_MAIN			0x00000001
 #define  HFI_HEVC_PROFILE_MAIN10		0x00000002
@@ -283,7 +283,7 @@
 	 (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
 #define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
-#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO	\
+#define HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
@@ -536,7 +536,8 @@
 struct hfi_quantization {
 	u32 qp_packed;
 	u32 layer_id;
-	u32 reserved[4];
+	u32 enable;
+	u32 reserved[3];
 };
 
 struct hfi_quantization_range {
@@ -588,7 +589,7 @@
 	u32 matrix_coeffs;
 };
 
-struct hfi_h264_vui_timing_info {
+struct hfi_vui_timing_info {
 	u32 enable;
 	u32 fixed_frame_rate;
 	u32 time_scale;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd..f37d64c 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -45,27 +45,36 @@
 	compat_caddr_t		bitmap;
 };
 
-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int get_v4l2_window32(struct v4l2_window __user *kp,
+			struct v4l2_window32 __user *up)
 {
+	u32 clipcount = 0;
+
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
-		copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
-		get_user(kp->field, &up->field) ||
-		get_user(kp->chromakey, &up->chromakey) ||
-		get_user(kp->clipcount, &up->clipcount))
+		!access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_window)) ||
+		copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
+		copy_in_user(&kp->field, &up->field, sizeof(up->field)) ||
+		copy_in_user(&kp->chromakey, &up->chromakey,
+			sizeof(up->chromakey)) ||
+		copy_in_user(&kp->clipcount, &up->clipcount,
+			sizeof(up->clipcount)))
 			return -EFAULT;
-	if (kp->clipcount > 2048)
+	if (get_user(clipcount, &kp->clipcount))
+		return -EFAULT;
+	if (clipcount > 2048)
 		return -EINVAL;
-	if (kp->clipcount) {
+	if (clipcount) {
 		struct v4l2_clip32 __user *uclips;
 		struct v4l2_clip __user *kclips;
-		int n = kp->clipcount;
+		int n = clipcount;
 		compat_caddr_t p;
 
 		if (get_user(p, &up->clips))
 			return -EFAULT;
 		uclips = compat_ptr(p);
 		kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
-		kp->clips = kclips;
+		if (put_user(kclips, &kp->clips))
+			return -EFAULT;
 		while (--n >= 0) {
 			if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
 				return -EFAULT;
@@ -74,89 +83,106 @@
 			uclips += 1;
 			kclips += 1;
 		}
-	} else
-		kp->clips = NULL;
-	return 0;
-}
-
-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
-{
-	if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
-		put_user(kp->field, &up->field) ||
-		put_user(kp->chromakey, &up->chromakey) ||
-		put_user(kp->clipcount, &up->clipcount))
+	} else {
+		if (put_user(NULL, &kp->clips))
 			return -EFAULT;
+	}
 	return 0;
 }
 
-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static int put_v4l2_window32(struct v4l2_window __user *kp,
+			struct v4l2_window32 __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+	if (copy_in_user(&up->w, &kp->w, sizeof(up->w)) ||
+		copy_in_user(&up->field, &kp->field, sizeof(up->field)) ||
+		copy_in_user(&up->chromakey, &kp->chromakey,
+			sizeof(up->chromakey)) ||
+		copy_in_user(&up->clipcount, &kp->clipcount,
+			sizeof(up->clipcount)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int get_v4l2_pix_format(struct v4l2_pix_format __user *kp,
+				struct v4l2_pix_format __user *up)
+{
+	if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format)))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int get_v4l2_pix_format_mplane(
+				struct v4l2_pix_format_mplane __user *kp,
 				struct v4l2_pix_format_mplane __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static inline int put_v4l2_pix_format(struct v4l2_pix_format __user *kp,
+				struct v4l2_pix_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int put_v4l2_pix_format_mplane(
+				struct v4l2_pix_format_mplane __user *kp,
 				struct v4l2_pix_format_mplane __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int get_v4l2_vbi_format(struct v4l2_vbi_format __user *kp,
+				struct v4l2_vbi_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int put_v4l2_vbi_format(struct v4l2_vbi_format __user *kp,
+				struct v4l2_vbi_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int get_v4l2_sliced_vbi_format(
+				struct v4l2_sliced_vbi_format __user *kp,
+				struct v4l2_sliced_vbi_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int put_v4l2_sliced_vbi_format(
+				struct v4l2_sliced_vbi_format __user *kp,
+				struct v4l2_sliced_vbi_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int get_v4l2_sdr_format(struct v4l2_sdr_format __user *kp,
+				struct v4l2_sdr_format __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_sdr_format)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int put_v4l2_sdr_format(struct v4l2_sdr_format __user *kp,
+					struct v4l2_sdr_format __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_sdr_format)))
 		return -EFAULT;
 	return 0;
 }
@@ -191,12 +217,17 @@
 	__u32			reserved[8];
 };
 
-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __get_v4l2_format32(struct v4l2_format __user *kp,
+				struct v4l2_format32 __user *up)
 {
-	if (get_user(kp->type, &up->type))
+	u32 type;
+
+	if (copy_in_user(&kp->type, &up->type, sizeof(up->type)))
 		return -EFAULT;
 
-	switch (kp->type) {
+	if (get_user(type, &kp->type))
+		return -EFAULT;
+	switch (type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -223,27 +254,39 @@
 	}
 }
 
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_format32(struct v4l2_format __user *kp,
+				struct v4l2_format32 __user *up)
 {
-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+		!access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_format)))
 		return -EFAULT;
 	return __get_v4l2_format32(kp, up);
 }
 
-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
+				struct v4l2_create_buffers32 __user *up)
 {
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
-	    copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+		!access_ok(VERIFY_WRITE, kp,
+			sizeof(struct v4l2_create_buffers)) ||
+		copy_in_user(kp, up,
+			offsetof(struct v4l2_create_buffers32, format)))
 		return -EFAULT;
 	return __get_v4l2_format32(&kp->format, &up->format);
 }
 
-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __put_v4l2_format32(struct v4l2_format __user *kp,
+				struct v4l2_format32 __user *up)
 {
-	if (put_user(kp->type, &up->type))
+	u32 type;
+
+	if (copy_in_user(&up->type, &kp->type, sizeof(up->type)))
 		return -EFAULT;
 
-	switch (kp->type) {
+	if (get_user(type, &kp->type))
+		return -EFAULT;
+
+	switch (type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -270,18 +313,24 @@
 	}
 }
 
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_format32(struct v4l2_format __user *kp,
+				struct v4l2_format32 __user *up)
 {
-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
+	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
+		!access_ok(VERIFY_READ, kp, sizeof(struct v4l2_format)))
 		return -EFAULT;
 	return __put_v4l2_format32(kp, up);
 }
 
-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
+				struct v4l2_create_buffers32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
-	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+		!access_ok(VERIFY_READ, kp,
+			sizeof(struct v4l2_create_buffers)) ||
+		copy_in_user(up, kp,
+			offsetof(struct v4l2_create_buffers32, format)) ||
+		copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
 		return -EFAULT;
 	return __put_v4l2_format32(&kp->format, &up->format);
 }
@@ -295,24 +344,30 @@
 	__u32		     reserved[4];
 };
 
-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int get_v4l2_standard32(struct v4l2_standard __user *kp,
+			struct v4l2_standard32 __user *up)
 {
 	/* other fields are not set by the user, nor used by the driver */
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
-		get_user(kp->index, &up->index))
+		!access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_standard)) ||
+		copy_in_user(&kp->index, &up->index, sizeof(up->index)))
 		return -EFAULT;
 	return 0;
 }
 
-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int put_v4l2_standard32(struct v4l2_standard __user *kp,
+				struct v4l2_standard32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
-		put_user(kp->index, &up->index) ||
-		put_user(kp->id, &up->id) ||
-		copy_to_user(up->name, kp->name, 24) ||
-		copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
-		put_user(kp->framelines, &up->framelines) ||
-		copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+		!access_ok(VERIFY_READ, kp, sizeof(struct v4l2_standard)) ||
+		copy_in_user(&up->index, &kp->index, sizeof(up->index)) ||
+		copy_in_user(&up->id, &kp->id, sizeof(up->id)) ||
+		copy_in_user(up->name, kp->name, 24) ||
+		copy_in_user(&up->frameperiod, &kp->frameperiod,
+			sizeof(up->frameperiod)) ||
+		copy_in_user(&up->framelines, &kp->framelines,
+			sizeof(up->framelines)) ||
+		copy_in_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
 			return -EFAULT;
 	return 0;
 }
@@ -360,6 +415,10 @@
 
 	if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
 		copy_in_user(&up->data_offset, &up32->data_offset,
+				sizeof(__u32)) ||
+		copy_in_user(up->reserved, up32->reserved,
+				sizeof(up->reserved)) ||
+		copy_in_user(&up->length, &up32->length,
 				sizeof(__u32)))
 		return -EFAULT;
 
@@ -386,7 +445,9 @@
 {
 	if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
 		copy_in_user(&up32->data_offset, &up->data_offset,
-				sizeof(__u32)))
+				sizeof(__u32)) ||
+		copy_in_user(up32->reserved, up->reserved,
+				sizeof(up32->reserved)))
 		return -EFAULT;
 
 	/* For MMAP, driver might've set up the offset, so copy it back.
@@ -404,34 +465,48 @@
 	return 0;
 }
 
-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
+				struct v4l2_buffer32 __user *up)
 {
 	struct v4l2_plane32 __user *uplane32;
 	struct v4l2_plane __user *uplane;
 	compat_caddr_t p;
 	int num_planes;
+	struct timeval time;
+	u32 plane_count, memory, type;
 	int ret;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
-		get_user(kp->index, &up->index) ||
-		get_user(kp->type, &up->type) ||
-		get_user(kp->flags, &up->flags) ||
-		get_user(kp->memory, &up->memory) ||
-		get_user(kp->length, &up->length))
+		!access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_buffer)) ||
+		copy_in_user(&kp->index, &up->index, sizeof(up->index)) ||
+		copy_in_user(&kp->type, &up->type, sizeof(up->type)) ||
+		copy_in_user(&kp->flags, &up->flags, sizeof(up->flags)) ||
+		copy_in_user(&kp->memory, &up->memory, sizeof(up->memory)) ||
+		copy_in_user(&kp->length, &up->length, sizeof(up->length)))
 			return -EFAULT;
 
-	if (V4L2_TYPE_IS_OUTPUT(kp->type))
-		if (get_user(kp->bytesused, &up->bytesused) ||
-			get_user(kp->field, &up->field) ||
-			get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
-			get_user(kp->timestamp.tv_usec,
-					&up->timestamp.tv_usec))
+	if (get_user(type, &kp->type))
+		return -EFAULT;
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		if (copy_in_user(&kp->bytesused, &up->bytesused,
+				sizeof(up->bytesused)) ||
+			copy_in_user(&kp->field, &up->field,
+				sizeof(up->field)) ||
+			get_user(time.tv_sec, &up->timestamp.tv_sec) ||
+			get_user(time.tv_usec, &up->timestamp.tv_usec) ||
+			put_user(time.tv_sec, &kp->timestamp.tv_sec) ||
+			put_user(time.tv_usec, &kp->timestamp.tv_usec))
 			return -EFAULT;
 
-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		num_planes = kp->length;
+	if (get_user(memory, &kp->memory))
+		return -EFAULT;
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		if (get_user(plane_count, &kp->length))
+			return -EFAULT;
+		num_planes = plane_count;
 		if (num_planes == 0) {
-			kp->m.planes = NULL;
+			if (put_user(NULL, &kp->m.planes))
+				return -EFAULT;
 			/* num_planes == 0 is legal, e.g. when userspace doesn't
 			 * need planes array on DQBUF*/
 			return 0;
@@ -449,37 +524,43 @@
 		 * by passing a very big num_planes value */
 		uplane = compat_alloc_user_space(num_planes *
 						sizeof(struct v4l2_plane));
-		kp->m.planes = (__force struct v4l2_plane *)uplane;
+		if (put_user(uplane, &kp->m.planes))
+			return -EFAULT;
 
 		while (--num_planes >= 0) {
-			ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+			ret = get_v4l2_plane32(uplane, uplane32, memory);
 			if (ret)
 				return ret;
 			++uplane;
 			++uplane32;
 		}
 	} else {
-		switch (kp->memory) {
+		switch (memory) {
 		case V4L2_MEMORY_MMAP:
-			if (get_user(kp->m.offset, &up->m.offset))
+			if (copy_in_user(&kp->m.offset, &up->m.offset,
+				sizeof(up->m.offset)))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
 			{
 			compat_long_t tmp;
+			unsigned long userptr;
 
 			if (get_user(tmp, &up->m.userptr))
 				return -EFAULT;
 
-			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+			userptr = (unsigned long)compat_ptr(tmp);
+			put_user(userptr, &kp->m.userptr);
 			}
 			break;
 		case V4L2_MEMORY_OVERLAY:
-			if (get_user(kp->m.offset, &up->m.offset))
+			if (copy_in_user(&kp->m.offset, &up->m.offset,
+				sizeof(up->m.offset)))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_DMABUF:
-			if (get_user(kp->m.fd, &up->m.fd))
+			if (copy_in_user(&kp->m.fd, &up->m.fd,
+				sizeof(up->m.fd)))
 				return -EFAULT;
 			break;
 		}
@@ -488,65 +569,86 @@
 	return 0;
 }
 
-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
+				struct v4l2_buffer32 __user *up)
 {
 	struct v4l2_plane32 __user *uplane32;
 	struct v4l2_plane __user *uplane;
 	compat_caddr_t p;
 	int num_planes;
 	int ret;
+	struct timeval time;
+	u32 memory, type, length;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
-		put_user(kp->index, &up->index) ||
-		put_user(kp->type, &up->type) ||
-		put_user(kp->flags, &up->flags) ||
-		put_user(kp->memory, &up->memory))
-			return -EFAULT;
+		!access_ok(VERIFY_READ, kp, sizeof(struct v4l2_buffer)) ||
+		copy_in_user(&up->index, &kp->index, sizeof(up->index)) ||
+		copy_in_user(&up->type, &kp->type, sizeof(up->type)) ||
+		copy_in_user(&up->flags, &kp->flags, sizeof(up->flags)) ||
+		copy_in_user(&up->memory, &kp->memory, sizeof(up->memory)))
+		return -EFAULT;
 
-	if (put_user(kp->bytesused, &up->bytesused) ||
-		put_user(kp->field, &up->field) ||
-		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
-		put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
-		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
-		put_user(kp->sequence, &up->sequence) ||
-		put_user(kp->reserved2, &up->reserved2) ||
-		put_user(kp->reserved, &up->reserved) ||
-		put_user(kp->length, &up->length))
-			return -EFAULT;
+	if (copy_in_user(&up->bytesused, &kp->bytesused,
+			sizeof(up->bytesused)) ||
+		copy_in_user(&up->field, &kp->field, sizeof(up->field)) ||
+		get_user(time.tv_sec, &kp->timestamp.tv_sec) ||
+		get_user(time.tv_usec, &kp->timestamp.tv_usec) ||
+		put_user(time.tv_sec, &up->timestamp.tv_sec) ||
+		put_user(time.tv_usec, &up->timestamp.tv_usec) ||
+		copy_in_user(&up->timecode, &kp->timecode,
+			sizeof(struct v4l2_timecode)) ||
+		copy_in_user(&up->sequence, &kp->sequence,
+			sizeof(up->sequence)) ||
+		copy_in_user(&up->reserved2, &kp->reserved2,
+			sizeof(up->reserved2)) ||
+		copy_in_user(&up->reserved, &kp->reserved,
+			sizeof(up->reserved)) ||
+		copy_in_user(&up->length, &kp->length, sizeof(up->length)))
+		return -EFAULT;
 
-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		num_planes = kp->length;
+	if (get_user(type, &kp->type) ||
+		get_user(memory, &kp->memory) ||
+		get_user(length, &kp->length))
+		return -EINVAL;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+		num_planes = length;
 		if (num_planes == 0)
 			return 0;
 
-		uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+		if (get_user(uplane, &kp->m.planes))
+			return -EFAULT;
 		if (get_user(p, &up->m.planes))
 			return -EFAULT;
 		uplane32 = compat_ptr(p);
 
 		while (--num_planes >= 0) {
-			ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+			ret = put_v4l2_plane32(uplane, uplane32, memory);
 			if (ret)
 				return ret;
 			++uplane;
 			++uplane32;
 		}
 	} else {
-		switch (kp->memory) {
+		switch (memory) {
 		case V4L2_MEMORY_MMAP:
-			if (put_user(kp->m.offset, &up->m.offset))
+			if (copy_in_user(&up->m.offset, &kp->m.offset,
+				sizeof(up->m.offset)))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
-			if (put_user(kp->m.userptr, &up->m.userptr))
+			if (copy_in_user(&up->m.userptr, &kp->m.userptr,
+				sizeof(up->m.userptr)))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_OVERLAY:
-			if (put_user(kp->m.offset, &up->m.offset))
+			if (copy_in_user(&up->m.offset, &kp->m.offset,
+				sizeof(up->m.offset)))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_DMABUF:
-			if (put_user(kp->m.fd, &up->m.fd))
+			if (copy_in_user(&up->m.fd, &kp->m.fd,
+				sizeof(up->m.fd)))
 				return -EFAULT;
 			break;
 		}
@@ -571,29 +673,39 @@
 	} fmt;
 };
 
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+					struct v4l2_framebuffer32 __user *up)
 {
 	u32 tmp;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+		!access_ok(VERIFY_WRITE, kp,
+			sizeof(struct v4l2_framebuffer)) ||
 		get_user(tmp, &up->base) ||
-		get_user(kp->capability, &up->capability) ||
-		get_user(kp->flags, &up->flags) ||
-		copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+		put_user(compat_ptr(tmp), &kp->base) ||
+		copy_in_user(&kp->capability, &up->capability,
+			sizeof(up->capability)) ||
+		copy_in_user(&kp->flags, &up->flags, sizeof(up->flags)) ||
+		copy_in_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
 			return -EFAULT;
-	kp->base = (__force void *)compat_ptr(tmp);
+
 	return 0;
 }
 
-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+					struct v4l2_framebuffer32 __user *up)
 {
-	u32 tmp = (u32)((unsigned long)kp->base);
+	unsigned long base;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
-		put_user(tmp, &up->base) ||
-		put_user(kp->capability, &up->capability) ||
-		put_user(kp->flags, &up->flags) ||
-		copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
+		!access_ok(VERIFY_READ, kp,
+			sizeof(struct v4l2_framebuffer)) ||
+		copy_from_user(&base, &kp->base, sizeof(base)) ||
+		put_user((u32)base, &up->base) ||
+		copy_in_user(&up->capability, &kp->capability,
+			sizeof(up->capability)) ||
+		copy_in_user(&up->flags, &kp->flags, sizeof(up->flags)) ||
+		copy_in_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
 			return -EFAULT;
 	return 0;
 }
@@ -611,16 +723,18 @@
 
 /* The 64-bit v4l2_input struct has extra padding at the end of the struct.
    Otherwise it is identical to the 32-bit version. */
-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int get_v4l2_input32(struct v4l2_input __user *kp,
+					struct v4l2_input32 __user *up)
 {
-	if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+	if (copy_in_user(kp, up, sizeof(struct v4l2_input32)))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int put_v4l2_input32(struct v4l2_input __user *kp,
+					struct v4l2_input32 __user *up)
 {
-	if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+	if (copy_in_user(up, kp, sizeof(struct v4l2_input32)))
 		return -EFAULT;
 	return 0;
 }
@@ -661,23 +775,33 @@
 	}
 }
 
-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp,
+					struct v4l2_ext_controls32 __user *up)
 {
 	struct v4l2_ext_control32 __user *ucontrols;
 	struct v4l2_ext_control __user *kcontrols;
 	int n;
 	compat_caddr_t p;
+	u32 count;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
-		get_user(kp->which, &up->which) ||
-		get_user(kp->count, &up->count) ||
-		get_user(kp->error_idx, &up->error_idx) ||
-		copy_from_user(kp->reserved, up->reserved,
-			       sizeof(kp->reserved)))
+		!access_ok(VERIFY_WRITE, kp,
+			sizeof(struct v4l2_ext_controls)) ||
+		copy_in_user(&kp->which, &up->which,
+			sizeof(up->which)) ||
+		copy_in_user(&kp->count, &up->count, sizeof(up->count)) ||
+		copy_in_user(&kp->error_idx, &up->error_idx,
+			sizeof(up->error_idx)) ||
+		copy_in_user(kp->reserved, up->reserved,
+			       sizeof(up->reserved)))
 			return -EFAULT;
-	n = kp->count;
+
+	if (get_user(count, &kp->count))
+		return -EFAULT;
+	n = count;
 	if (n == 0) {
-		kp->controls = NULL;
+		if (put_user(NULL, &kp->controls))
+			return -EINVAL;
 		return 0;
 	}
 	if (get_user(p, &up->controls))
@@ -687,7 +811,9 @@
 			n * sizeof(struct v4l2_ext_control32)))
 		return -EFAULT;
 	kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
-	kp->controls = (__force struct v4l2_ext_control *)kcontrols;
+	if (put_user(kcontrols, &kp->controls))
+		return -EFAULT;
+
 	while (--n >= 0) {
 		u32 id;
 
@@ -710,23 +836,33 @@
 	return 0;
 }
 
-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp,
+				struct v4l2_ext_controls32 __user *up)
 {
 	struct v4l2_ext_control32 __user *ucontrols;
-	struct v4l2_ext_control __user *kcontrols =
-		(__force struct v4l2_ext_control __user *)kp->controls;
-	int n = kp->count;
+	struct v4l2_ext_control __user *kcontrols;
+	int n;
+	u32 count;
 	compat_caddr_t p;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
-		put_user(kp->which, &up->which) ||
-		put_user(kp->count, &up->count) ||
-		put_user(kp->error_idx, &up->error_idx) ||
-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+		!access_ok(VERIFY_READ, kp,
+			sizeof(struct v4l2_ext_controls)) ||
+		copy_in_user(&up->which, &kp->which,
+			sizeof(up->which)) ||
+		copy_in_user(&up->count, &kp->count,
+			sizeof(up->count)) ||
+		copy_in_user(&up->error_idx, &kp->error_idx,
+			sizeof(up->error_idx)) ||
+		copy_in_user(up->reserved, kp->reserved,
+			sizeof(up->reserved)) ||
+		get_user(count, &kp->count) ||
+		get_user(kcontrols, &kp->controls))
 			return -EFAULT;
-	if (!kp->count)
+	if (!count)
 		return 0;
 
+	n = count;
 	if (get_user(p, &up->controls))
 		return -EFAULT;
 	ucontrols = compat_ptr(p);
@@ -766,16 +902,22 @@
 	__u32				reserved[8];
 };
 
-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+static int put_v4l2_event32(struct v4l2_event __user *kp,
+			struct v4l2_event32 __user *up)
 {
+	struct timespec ts;
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
-		put_user(kp->type, &up->type) ||
-		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
-		put_user(kp->pending, &up->pending) ||
-		put_user(kp->sequence, &up->sequence) ||
-		compat_put_timespec(&kp->timestamp, &up->timestamp) ||
-		put_user(kp->id, &up->id) ||
-		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+		!access_ok(VERIFY_READ, kp, sizeof(struct v4l2_event)) ||
+		copy_in_user(&up->type, &kp->type, sizeof(up->type)) ||
+		copy_in_user(&up->u, &kp->u, sizeof(up->u)) ||
+		copy_in_user(&up->pending, &kp->pending,
+			sizeof(up->pending)) ||
+		copy_in_user(&up->sequence, &kp->sequence,
+			sizeof(up->sequence)) ||
+		copy_from_user(&ts, &kp->timestamp, sizeof(ts)) ||
+		compat_put_timespec(&ts, &up->timestamp) ||
+		copy_in_user(&up->id, &kp->id, sizeof(up->id)) ||
+		copy_in_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
 			return -EFAULT;
 	return 0;
 }
@@ -788,31 +930,39 @@
 	compat_caddr_t edid;
 };
 
-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid __user *kp,
+						struct v4l2_edid32 __user *up)
 {
 	u32 tmp;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
-		get_user(kp->pad, &up->pad) ||
-		get_user(kp->start_block, &up->start_block) ||
-		get_user(kp->blocks, &up->blocks) ||
+		!access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_edid)) ||
+		copy_in_user(&kp->pad, &up->pad, sizeof(up->pad)) ||
+		copy_in_user(&kp->start_block, &up->start_block,
+			sizeof(up->start_block)) ||
+		copy_in_user(&kp->blocks, &up->blocks, sizeof(up->blocks)) ||
 		get_user(tmp, &up->edid) ||
-		copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+		put_user(compat_ptr(tmp), &kp->edid) ||
+		copy_in_user(kp->reserved, up->reserved,
+			sizeof(kp->reserved)))
 			return -EFAULT;
-	kp->edid = (__force u8 *)compat_ptr(tmp);
 	return 0;
 }
 
-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid __user *kp,
+			struct v4l2_edid32 __user *up)
 {
-	u32 tmp = (u32)((unsigned long)kp->edid);
+	unsigned long ptr;
 
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
-		put_user(kp->pad, &up->pad) ||
-		put_user(kp->start_block, &up->start_block) ||
-		put_user(kp->blocks, &up->blocks) ||
-		put_user(tmp, &up->edid) ||
-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+		!access_ok(VERIFY_READ, kp, sizeof(struct v4l2_edid)) ||
+		copy_in_user(&up->pad, &kp->pad, sizeof(up->pad)) ||
+		copy_in_user(&up->start_block, &kp->start_block,
+			sizeof(up->start_block)) ||
+		copy_in_user(&up->blocks, &kp->blocks, sizeof(up->blocks)) ||
+		copy_from_user(&ptr, &kp->edid, sizeof(ptr)) ||
+		put_user((u32)ptr, &up->edid) ||
+		copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
 			return -EFAULT;
 	return 0;
 }
@@ -859,11 +1009,16 @@
 		struct v4l2_edid v2edid;
 		unsigned long vx;
 		int vi;
-	} karg;
+	} *karg;
 	void __user *up = compat_ptr(arg);
 	int compatible_arg = 1;
 	long err = 0;
 
+	karg = compat_alloc_user_space(sizeof(*karg));
+	if (karg == NULL) {
+		return -EFAULT;
+	}
+
 	/* First, convert the command. */
 	switch (cmd) {
 	case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
@@ -899,7 +1054,8 @@
 	case VIDIOC_STREAMOFF:
 	case VIDIOC_S_INPUT:
 	case VIDIOC_S_OUTPUT:
-		err = get_user(karg.vi, (s32 __user *)up);
+		err = copy_in_user(&karg->vi, (s32 __user *)up,
+			sizeof(karg->vi));
 		compatible_arg = 0;
 		break;
 
@@ -910,19 +1066,19 @@
 
 	case VIDIOC_G_EDID:
 	case VIDIOC_S_EDID:
-		err = get_v4l2_edid32(&karg.v2edid, up);
+		err = get_v4l2_edid32(&karg->v2edid, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_FMT:
 	case VIDIOC_S_FMT:
 	case VIDIOC_TRY_FMT:
-		err = get_v4l2_format32(&karg.v2f, up);
+		err = get_v4l2_format32(&karg->v2f, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_CREATE_BUFS:
-		err = get_v4l2_create32(&karg.v2crt, up);
+		err = get_v4l2_create32(&karg->v2crt, up);
 		compatible_arg = 0;
 		break;
 
@@ -930,12 +1086,12 @@
 	case VIDIOC_QUERYBUF:
 	case VIDIOC_QBUF:
 	case VIDIOC_DQBUF:
-		err = get_v4l2_buffer32(&karg.v2b, up);
+		err = get_v4l2_buffer32(&karg->v2b, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_S_FBUF:
-		err = get_v4l2_framebuffer32(&karg.v2fb, up);
+		err = get_v4l2_framebuffer32(&karg->v2fb, up);
 		compatible_arg = 0;
 		break;
 
@@ -944,19 +1100,19 @@
 		break;
 
 	case VIDIOC_ENUMSTD:
-		err = get_v4l2_standard32(&karg.v2s, up);
+		err = get_v4l2_standard32(&karg->v2s, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_ENUMINPUT:
-		err = get_v4l2_input32(&karg.v2i, up);
+		err = get_v4l2_input32(&karg->v2i, up);
 		compatible_arg = 0;
 		break;
 
 	case VIDIOC_G_EXT_CTRLS:
 	case VIDIOC_S_EXT_CTRLS:
 	case VIDIOC_TRY_EXT_CTRLS:
-		err = get_v4l2_ext_controls32(&karg.v2ecs, up);
+		err = get_v4l2_ext_controls32(&karg->v2ecs, up);
 		compatible_arg = 0;
 		break;
 	case VIDIOC_DQEVENT:
@@ -969,11 +1125,7 @@
 	if (compatible_arg)
 		err = native_ioctl(file, cmd, (unsigned long)up);
 	else {
-		mm_segment_t old_fs = get_fs();
-
-		set_fs(KERNEL_DS);
-		err = native_ioctl(file, cmd, (unsigned long)&karg);
-		set_fs(old_fs);
+		err = native_ioctl(file, cmd, (unsigned long)karg);
 	}
 
 	/* Special case: even after an error we need to put the
@@ -983,7 +1135,7 @@
 	case VIDIOC_G_EXT_CTRLS:
 	case VIDIOC_S_EXT_CTRLS:
 	case VIDIOC_TRY_EXT_CTRLS:
-		if (put_v4l2_ext_controls32(&karg.v2ecs, up))
+		if (put_v4l2_ext_controls32(&karg->v2ecs, up))
 			err = -EFAULT;
 		break;
 	}
@@ -995,44 +1147,44 @@
 	case VIDIOC_S_OUTPUT:
 	case VIDIOC_G_INPUT:
 	case VIDIOC_G_OUTPUT:
-		err = put_user(((s32)karg.vi), (s32 __user *)up);
+		err = copy_in_user(up, &karg->vi, sizeof(s32));
 		break;
 
 	case VIDIOC_G_FBUF:
-		err = put_v4l2_framebuffer32(&karg.v2fb, up);
+		err = put_v4l2_framebuffer32(&karg->v2fb, up);
 		break;
 
 	case VIDIOC_DQEVENT:
-		err = put_v4l2_event32(&karg.v2ev, up);
+		err = put_v4l2_event32(&karg->v2ev, up);
 		break;
 
 	case VIDIOC_G_EDID:
 	case VIDIOC_S_EDID:
-		err = put_v4l2_edid32(&karg.v2edid, up);
+		err = put_v4l2_edid32(&karg->v2edid, up);
 		break;
 
 	case VIDIOC_G_FMT:
 	case VIDIOC_S_FMT:
 	case VIDIOC_TRY_FMT:
-		err = put_v4l2_format32(&karg.v2f, up);
+		err = put_v4l2_format32(&karg->v2f, up);
 		break;
 
 	case VIDIOC_CREATE_BUFS:
-		err = put_v4l2_create32(&karg.v2crt, up);
+		err = put_v4l2_create32(&karg->v2crt, up);
 		break;
 
 	case VIDIOC_QUERYBUF:
 	case VIDIOC_QBUF:
 	case VIDIOC_DQBUF:
-		err = put_v4l2_buffer32(&karg.v2b, up);
+		err = put_v4l2_buffer32(&karg->v2b, up);
 		break;
 
 	case VIDIOC_ENUMSTD:
-		err = put_v4l2_standard32(&karg.v2s, up);
+		err = put_v4l2_standard32(&karg->v2s, up);
 		break;
 
 	case VIDIOC_ENUMINPUT:
-		err = put_v4l2_input32(&karg.v2i, up);
+		err = put_v4l2_input32(&karg->v2i, up);
 		break;
 	}
 	return err;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index adc2147..6739fb0 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -337,6 +337,8 @@
 		"4.2",
 		"5",
 		"5.1",
+		"5.2",
+		"Unknown",
 		NULL,
 	};
 	static const char * const h264_loop_filter[] = {
@@ -363,6 +365,7 @@
 		"Scalable High Intra",
 		"Stereo High",
 		"Multiview High",
+		"Constrained High",
 		NULL,
 	};
 	static const char * const vui_sar_idc[] = {
diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig
index 9c73960..e8a7960 100644
--- a/drivers/misc/qcom/Kconfig
+++ b/drivers/misc/qcom/Kconfig
@@ -1,6 +1,5 @@
 config MSM_QDSP6V2_CODECS
 	bool "Audio QDSP6V2 APR support"
-	depends on MSM_SMD
 	select SND_SOC_QDSP6V2
 	help
 	  Enable Audio codecs with APR IPC protocol support between
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2c02d2d..877c4d1 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -5869,7 +5869,8 @@
 		 * It will return false if it is GPCE based crypto instance or
 		 * ICE is setup properly
 		 */
-		if (qseecom_enable_ice_setup(create_key_req.usage))
+		ret = qseecom_enable_ice_setup(create_key_req.usage);
+		if (ret)
 			goto free_buf;
 
 		do {
@@ -5998,7 +5999,8 @@
 		 * It will return false if it is GPCE based crypto instance or
 		 * ICE is setup properly
 		 */
-		if (qseecom_enable_ice_setup(wipe_key_req.usage))
+		ret = qseecom_enable_ice_setup(wipe_key_req.usage);
+		if (ret)
 			goto free_buf;
 
 		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 3bc7d4e..127a052 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -96,9 +96,11 @@
 {
 	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
 	cputime_t utime;
 	cputime_t stime;
 	unsigned long bkt;
+	uid_t uid;
 
 	rt_mutex_lock(&uid_lock);
 
@@ -109,14 +111,13 @@
 
 	read_lock(&tasklist_lock);
 	do_each_thread(temp, task) {
-		uid_entry = find_or_register_uid(from_kuid_munged(
-			current_user_ns(), task_uid(task)));
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		uid_entry = find_or_register_uid(uid);
 		if (!uid_entry) {
 			read_unlock(&tasklist_lock);
 			rt_mutex_unlock(&uid_lock);
 			pr_err("%s: failed to find the uid_entry for uid %d\n",
-				__func__, from_kuid_munged(current_user_ns(),
-				task_uid(task)));
+				__func__, uid);
 			return -ENOMEM;
 		}
 		task_cputime_adjusted(task, &utime, &stime);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a0dabd4..7ab24c5 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -740,13 +740,18 @@
 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
 {
 	struct gs_can *dev = netdev_priv(netdev);
-	struct gs_identify_mode imode;
+	struct gs_identify_mode *imode;
 	int rc;
 
+	imode = kmalloc(sizeof(*imode), GFP_KERNEL);
+
+	if (!imode)
+		return -ENOMEM;
+
 	if (do_identify)
-		imode.mode = GS_CAN_IDENTIFY_ON;
+		imode->mode = GS_CAN_IDENTIFY_ON;
 	else
-		imode.mode = GS_CAN_IDENTIFY_OFF;
+		imode->mode = GS_CAN_IDENTIFY_OFF;
 
 	rc = usb_control_msg(interface_to_usbdev(dev->iface),
 			     usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -756,10 +761,12 @@
 			     USB_RECIP_INTERFACE,
 			     dev->channel,
 			     0,
-			     &imode,
-			     sizeof(imode),
+			     imode,
+			     sizeof(*imode),
 			     100);
 
+	kfree(imode);
+
 	return (rc > 0) ? 0 : rc;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 81d8e3b..21ce0b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,7 +82,7 @@
 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
 
 #define MLX5_UMR_ALIGN				(2048)
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD	(128)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD	(256)
 
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_DEFAULT_LRO_TIMEOUT                       32
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 90e81ae..e034dbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -563,6 +563,7 @@
 	int idx = 0;
 	int err = 0;
 
+	info->data = MAX_NUM_OF_ETHTOOL_RULES;
 	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
 		err = mlx5e_ethtool_get_flow(priv, info, location);
 		if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 5595724..b5d5519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@
 					 struct netdev_notifier_changeupper_info *info)
 {
 	struct net_device *upper = info->upper_dev, *ndev_tmp;
-	struct netdev_lag_upper_info *lag_upper_info;
+	struct netdev_lag_upper_info *lag_upper_info = NULL;
 	bool is_bonded;
 	int bond_status = 0;
 	int num_slaves = 0;
@@ -303,7 +303,8 @@
 	if (!netif_is_lag_master(upper))
 		return 0;
 
-	lag_upper_info = info->upper_info;
+	if (info->linking)
+		lag_upper_info = info->upper_info;
 
 	/* The event may still be of interest if the slave does not belong to
 	 * us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7a196a0..d776db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@
 	if (err) {
 		dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
 			FW_INIT_TIMEOUT_MILI);
-		goto out_err;
+		goto err_cmd_cleanup;
 	}
 
 	err = mlx5_core_enable_hca(dev, 0);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1a92de7..a2d218b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1059,12 +1059,70 @@
 	.get_mdio_data = sh_get_mdio,
 };
 
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	struct sh_eth_txdesc *txdesc;
+	int free_num = 0;
+	int entry;
+	bool sent;
+
+	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+		entry = mdp->dirty_tx % mdp->num_tx_ring;
+		txdesc = &mdp->tx_ring[entry];
+		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+		if (sent_only && !sent)
+			break;
+		/* TACT bit must be checked before all the following reads */
+		dma_rmb();
+		netif_info(mdp, tx_done, ndev,
+			   "tx entry %d status 0x%08x\n",
+			   entry, le32_to_cpu(txdesc->status));
+		/* Free the original skb. */
+		if (mdp->tx_skbuff[entry]) {
+			dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+					 le32_to_cpu(txdesc->len) >> 16,
+					 DMA_TO_DEVICE);
+			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+			mdp->tx_skbuff[entry] = NULL;
+			free_num++;
+		}
+		txdesc->status = cpu_to_le32(TD_TFP);
+		if (entry >= mdp->num_tx_ring - 1)
+			txdesc->status |= cpu_to_le32(TD_TDLE);
+
+		if (sent) {
+			ndev->stats.tx_packets++;
+			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+		}
+	}
+	return free_num;
+}
+
 /* free skb and descriptor buffer */
 static void sh_eth_ring_free(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	int ringsize, i;
 
+	if (mdp->rx_ring) {
+		for (i = 0; i < mdp->num_rx_ring; i++) {
+			if (mdp->rx_skbuff[i]) {
+				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+				dma_unmap_single(&ndev->dev,
+						 le32_to_cpu(rxdesc->addr),
+						 ALIGN(mdp->rx_buf_sz, 32),
+						 DMA_FROM_DEVICE);
+			}
+		}
+		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+				  mdp->rx_desc_dma);
+		mdp->rx_ring = NULL;
+	}
+
 	/* Free Rx skb ringbuffer */
 	if (mdp->rx_skbuff) {
 		for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1073,27 +1131,18 @@
 	kfree(mdp->rx_skbuff);
 	mdp->rx_skbuff = NULL;
 
-	/* Free Tx skb ringbuffer */
-	if (mdp->tx_skbuff) {
-		for (i = 0; i < mdp->num_tx_ring; i++)
-			dev_kfree_skb(mdp->tx_skbuff[i]);
-	}
-	kfree(mdp->tx_skbuff);
-	mdp->tx_skbuff = NULL;
-
-	if (mdp->rx_ring) {
-		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
-				  mdp->rx_desc_dma);
-		mdp->rx_ring = NULL;
-	}
-
 	if (mdp->tx_ring) {
+		sh_eth_tx_free(ndev, false);
+
 		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
 		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
 				  mdp->tx_desc_dma);
 		mdp->tx_ring = NULL;
 	}
+
+	/* Free Tx skb ringbuffer */
+	kfree(mdp->tx_skbuff);
+	mdp->tx_skbuff = NULL;
 }
 
 /* format skb and descriptor buffer */
@@ -1341,43 +1390,6 @@
 	update_mac_address(ndev);
 }
 
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
-	struct sh_eth_private *mdp = netdev_priv(ndev);
-	struct sh_eth_txdesc *txdesc;
-	int free_num = 0;
-	int entry;
-
-	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
-		entry = mdp->dirty_tx % mdp->num_tx_ring;
-		txdesc = &mdp->tx_ring[entry];
-		if (txdesc->status & cpu_to_le32(TD_TACT))
-			break;
-		/* TACT bit must be checked before all the following reads */
-		dma_rmb();
-		netif_info(mdp, tx_done, ndev,
-			   "tx entry %d status 0x%08x\n",
-			   entry, le32_to_cpu(txdesc->status));
-		/* Free the original skb. */
-		if (mdp->tx_skbuff[entry]) {
-			dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
-					 le32_to_cpu(txdesc->len) >> 16,
-					 DMA_TO_DEVICE);
-			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
-			mdp->tx_skbuff[entry] = NULL;
-			free_num++;
-		}
-		txdesc->status = cpu_to_le32(TD_TFP);
-		if (entry >= mdp->num_tx_ring - 1)
-			txdesc->status |= cpu_to_le32(TD_TDLE);
-
-		ndev->stats.tx_packets++;
-		ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
-	}
-	return free_num;
-}
-
 /* Packet receive function */
 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 {
@@ -1620,7 +1632,7 @@
 			   intr_status, mdp->cur_tx, mdp->dirty_tx,
 			   (u32)ndev->state, edtrr);
 		/* dirty buffer free */
-		sh_eth_txfree(ndev);
+		sh_eth_tx_free(ndev, true);
 
 		/* SH7712 BUG */
 		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1679,7 +1691,7 @@
 		/* Clear Tx interrupts */
 		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
 
-		sh_eth_txfree(ndev);
+		sh_eth_tx_free(ndev, true);
 		netif_wake_queue(ndev);
 	}
 
@@ -2307,7 +2319,7 @@
 
 	spin_lock_irqsave(&mdp->lock, flags);
 	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
-		if (!sh_eth_txfree(ndev)) {
+		if (!sh_eth_tx_free(ndev, true)) {
 			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
 			netif_stop_queue(ndev);
 			spin_unlock_irqrestore(&mdp->lock, flags);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d2e61e0..f7c6a40 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2709,7 +2709,7 @@
 }
 
 #define MACSEC_FEATURES \
-	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+	(NETIF_F_SG | NETIF_F_HIGHDMA)
 static struct lock_class_key macsec_netdev_addr_lock_key;
 
 static int macsec_dev_init(struct net_device *dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 26d6f0b..dc8ccac 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1140,6 +1140,7 @@
 static void macvlan_port_destroy(struct net_device *dev)
 {
 	struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+	struct sk_buff *skb;
 
 	dev->priv_flags &= ~IFF_MACVLAN_PORT;
 	netdev_rx_handler_unregister(dev);
@@ -1148,7 +1149,15 @@
 	 * but we need to cancel it and purge left skbs if any.
 	 */
 	cancel_work_sync(&port->bc_work);
-	__skb_queue_purge(&port->bc_queue);
+
+	while ((skb = __skb_dequeue(&port->bc_queue))) {
+		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+		if (src)
+			dev_put(src->dev);
+
+		kfree_skb(skb);
+	}
 
 	kfree_rcu(port, rcu);
 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7a240fc..4865221 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1438,8 +1438,6 @@
 		skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
 		skb_queue_tail(&dp83640->rx_queue, skb);
 		schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
-	} else {
-		netif_rx_ni(skb);
 	}
 
 	return true;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 201ffa5..a9be26f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -552,16 +552,18 @@
 EXPORT_SYMBOL(phy_mii_ioctl);
 
 /**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * phy_start_aneg_priv - start auto-negotiation for this PHY device
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: Sanitizes the settings (if we're not autonegotiating
  *   them), and then calls the driver's config_aneg function.
  *   If the PHYCONTROL Layer is operating, we change the state to
  *   reflect the beginning of Auto-negotiation or forcing.
  */
-int phy_start_aneg(struct phy_device *phydev)
+static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
 {
+	bool trigger = 0;
 	int err;
 
 	mutex_lock(&phydev->lock);
@@ -586,10 +588,40 @@
 		}
 	}
 
+	/* Re-schedule a PHY state machine to check PHY status because
+	 * negotiation may already be done and aneg interrupt may not be
+	 * generated.
+	 */
+	if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+		err = phy_aneg_done(phydev);
+		if (err > 0) {
+			trigger = true;
+			err = 0;
+		}
+	}
+
 out_unlock:
 	mutex_unlock(&phydev->lock);
+
+	if (trigger)
+		phy_trigger_machine(phydev, sync);
+
 	return err;
 }
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ *   them), and then calls the driver's config_aneg function.
+ *   If the PHYCONTROL Layer is operating, we change the state to
+ *   reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+	return phy_start_aneg_priv(phydev, true);
+}
 EXPORT_SYMBOL(phy_start_aneg);
 
 /**
@@ -617,7 +649,7 @@
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
 	if (sync)
 		cancel_delayed_work_sync(&phydev->state_queue);
@@ -639,7 +671,7 @@
 	cancel_delayed_work_sync(&phydev->state_queue);
 
 	mutex_lock(&phydev->lock);
-	if (phydev->state > PHY_UP)
+	if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
 		phydev->state = PHY_UP;
 	mutex_unlock(&phydev->lock);
 }
@@ -1100,7 +1132,7 @@
 	mutex_unlock(&phydev->lock);
 
 	if (needs_aneg)
-		err = phy_start_aneg(phydev);
+		err = phy_start_aneg_priv(phydev, false);
 	else if (do_suspend)
 		phy_suspend(phydev);
 
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 0184c96..3a45cf8 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -206,7 +206,9 @@
 	while ((skb = skb_dequeue(&delivery_queue))) {
 		struct sock *sk_udp = skb->sk;
 		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
-		struct msghdr msg = { 0 };
+		struct msghdr msg = {
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
 
 		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
 			      skb->len);
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
index d9e0603..cdb4fa1 100644
--- a/drivers/net/ppp/pppopns.c
+++ b/drivers/net/ppp/pppopns.c
@@ -189,7 +189,9 @@
 	while ((skb = skb_dequeue(&delivery_queue))) {
 		struct sock *sk_raw = skb->sk;
 		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
-		struct msghdr msg = { 0 };
+		struct msghdr msg = {
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
 
 		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
 			      skb->len);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index a2afb8e..80ef486 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1124,7 +1124,7 @@
 		goto nla_put_failure;
 
 	/* rule only needs to appear once */
-	nlh->nlmsg_flags &= NLM_F_EXCL;
+	nlh->nlmsg_flags |= NLM_F_EXCL;
 
 	frh = nlmsg_data(nlh);
 	memset(frh, 0, sizeof(*frh));
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 6c68fd9..4e111cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -474,22 +474,23 @@
 	}
 	mutex_unlock(&wil->p2p_wdev_mutex);
 
-	/* social scan on P2P_DEVICE is handled as p2p search */
-	if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
-	    wil_p2p_is_social_scan(request)) {
+	if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
 		if (!wil->p2p.p2p_dev_started) {
 			wil_err(wil, "P2P search requested on stopped P2P device\n");
 			rc = -EIO;
 			goto out;
 		}
-		wil->scan_request = request;
-		wil->radio_wdev = wdev;
-		rc = wil_p2p_search(wil, request);
-		if (rc) {
-			wil->radio_wdev = wil_to_wdev(wil);
-			wil->scan_request = NULL;
+		/* social scan on P2P_DEVICE is handled as p2p search */
+		if (wil_p2p_is_social_scan(request)) {
+			wil->scan_request = request;
+			wil->radio_wdev = wdev;
+			rc = wil_p2p_search(wil, request);
+			if (rc) {
+				wil->radio_wdev = wil_to_wdev(wil);
+				wil->scan_request = NULL;
+			}
+			goto out;
 		}
-		goto out;
 	}
 
 	(void)wil_p2p_stop_discovery(wil);
@@ -499,9 +500,9 @@
 
 	for (i = 0; i < request->n_ssids; i++) {
 		wil_dbg_misc(wil, "SSID[%d]", i);
-		print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-				     request->ssids[i].ssid,
-				     request->ssids[i].ssid_len);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ssids[i].ssid,
+				  request->ssids[i].ssid_len, true);
 	}
 
 	if (request->n_ssids)
@@ -538,8 +539,8 @@
 	}
 
 	if (request->ie_len)
-		print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
-				     request->ie, request->ie_len);
+		wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ie, request->ie_len, true);
 	else
 		wil_dbg_misc(wil, "Scan has no IE's\n");
 
@@ -763,6 +764,8 @@
 	rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
 	if (rc == 0) {
 		netif_carrier_on(ndev);
+		wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+		wil->bss = bss;
 		/* Connect can take lots of time */
 		mod_timer(&wil->connect_timer,
 			  jiffies + msecs_to_jiffies(2000));
@@ -791,6 +794,7 @@
 		return 0;
 	}
 
+	wil->locally_generated_disc = true;
 	rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
 		      WMI_DISCONNECT_EVENTID, NULL, 0,
 		      WIL6210_DISCONNECT_TO_MS);
@@ -844,7 +848,8 @@
 	 */
 
 	wil_dbg_misc(wil, "mgmt_tx\n");
-	print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+	wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+			  len, true);
 
 	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
 	if (!cmd) {
@@ -1177,18 +1182,18 @@
 
 static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
 {
-	print_hex_dump_bytes("head     ", DUMP_PREFIX_OFFSET,
-			     b->head, b->head_len);
-	print_hex_dump_bytes("tail     ", DUMP_PREFIX_OFFSET,
-			     b->tail, b->tail_len);
-	print_hex_dump_bytes("BCON IE  ", DUMP_PREFIX_OFFSET,
-			     b->beacon_ies, b->beacon_ies_len);
-	print_hex_dump_bytes("PROBE    ", DUMP_PREFIX_OFFSET,
-			     b->probe_resp, b->probe_resp_len);
-	print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
-			     b->proberesp_ies, b->proberesp_ies_len);
-	print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
-			     b->assocresp_ies, b->assocresp_ies_len);
+	wil_hex_dump_misc("head     ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->head, b->head_len, true);
+	wil_hex_dump_misc("tail     ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->tail, b->tail_len, true);
+	wil_hex_dump_misc("BCON IE  ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->beacon_ies, b->beacon_ies_len, true);
+	wil_hex_dump_misc("PROBE    ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->probe_resp, b->probe_resp_len, true);
+	wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->proberesp_ies, b->proberesp_ies_len, true);
+	wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
+			  b->assocresp_ies, b->assocresp_ies_len, true);
 }
 
 /* internal functions for device reset and starting AP */
@@ -1282,6 +1287,7 @@
 	wil->pbss = pbss;
 
 	netif_carrier_on(ndev);
+	wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
 
 	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
 	if (rc)
@@ -1297,6 +1303,7 @@
 	wmi_pcp_stop(wil);
 err_pcp_start:
 	netif_carrier_off(ndev);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 out:
 	mutex_unlock(&wil->mutex);
 	return rc;
@@ -1382,8 +1389,8 @@
 	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
 		     info->dtim_period);
 	wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
-	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-			     info->ssid, info->ssid_len);
+	wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+			  info->ssid, info->ssid_len, true);
 	wil_print_bcon_data(bcon);
 	wil_print_crypto(wil, crypto);
 
@@ -1403,6 +1410,7 @@
 	wil_dbg_misc(wil, "stop_ap\n");
 
 	netif_carrier_off(ndev);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
 	mutex_lock(&wil->mutex);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3e8cdf1..5648ebb 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -524,9 +524,8 @@
 	if (!buf)
 		return -ENOMEM;
 
-	wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
-				    (const volatile void __iomem *)
-				    wil_blob->blob.data + pos, count);
+	wil_memcpy_fromio_32(buf, (const void __iomem *)
+			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
 	kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2c48419..36959a3 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -30,8 +30,8 @@
 module_param(debug_fw, bool, 0444);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
-static bool oob_mode;
-module_param(oob_mode, bool, 0444);
+static u8 oob_mode;
+module_param(oob_mode, byte, 0444);
 MODULE_PARM_DESC(oob_mode,
 		 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
 
@@ -135,14 +135,6 @@
 		*d++ = __raw_readl(s++);
 }
 
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
-				 const volatile void __iomem *src, size_t count)
-{
-	wil_halp_vote(wil);
-	wil_memcpy_fromio_32(dst, src, count);
-	wil_halp_unvote(wil);
-}
-
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count)
 {
@@ -153,15 +145,6 @@
 		__raw_writel(*s++, d++);
 }
 
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
-			       volatile void __iomem *dst,
-			       const void *src, size_t count)
-{
-	wil_halp_vote(wil);
-	wil_memcpy_toio_32(dst, src, count);
-	wil_halp_unvote(wil);
-}
-
 static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
 			       u16 reason_code, bool from_event)
 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -274,15 +257,20 @@
 		wil_bcast_fini(wil);
 		wil_update_net_queues_bh(wil, NULL, true);
 		netif_carrier_off(ndev);
+		wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 
 		if (test_bit(wil_status_fwconnected, wil->status)) {
 			clear_bit(wil_status_fwconnected, wil->status);
 			cfg80211_disconnected(ndev, reason_code,
-					      NULL, 0, false, GFP_KERNEL);
+					      NULL, 0,
+					      wil->locally_generated_disc,
+					      GFP_KERNEL);
+			wil->locally_generated_disc = false;
 		} else if (test_bit(wil_status_fwconnecting, wil->status)) {
 			cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
 						WLAN_STATUS_UNSPECIFIED_FAILURE,
 						GFP_KERNEL);
+			wil->bss = NULL;
 		}
 		clear_bit(wil_status_fwconnecting, wil->status);
 		break;
@@ -304,10 +292,34 @@
 {
 	struct wil6210_priv *wil = container_of(work,
 			struct wil6210_priv, disconnect_worker);
+	struct net_device *ndev = wil_to_ndev(wil);
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_disconnect_event evt;
+	} __packed reply;
 
-	mutex_lock(&wil->mutex);
-	_wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
-	mutex_unlock(&wil->mutex);
+	if (test_bit(wil_status_fwconnected, wil->status))
+		/* connect succeeded after all */
+		return;
+
+	if (!test_bit(wil_status_fwconnecting, wil->status))
+		/* already disconnected */
+		return;
+
+	rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+		      WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
+		      WIL6210_DISCONNECT_TO_MS);
+	if (rc) {
+		wil_err(wil, "disconnect error %d\n", rc);
+		return;
+	}
+
+	wil_update_net_queues_bh(wil, NULL, true);
+	netif_carrier_off(ndev);
+	cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
+				WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
+	clear_bit(wil_status_fwconnecting, wil->status);
 }
 
 static void wil_connect_timer_fn(ulong x)
@@ -557,6 +569,12 @@
 	return -EAGAIN;
 }
 
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
+{
+	if (wil->platform_ops.bus_request)
+		wil->platform_ops.bus_request(wil->platform_handle, kbps);
+}
+
 /**
  * wil6210_disconnect - disconnect one connection
  * @wil: driver context
@@ -610,13 +628,25 @@
 	wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
-static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
 {
-	wil_info(wil, "enable=%d\n", enable);
-	if (enable)
+	wil_info(wil, "oob_mode to %d\n", mode);
+	switch (mode) {
+	case 0:
+		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
+		      BIT_USER_OOB_R2_MODE);
+		break;
+	case 1:
+		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
 		wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
-	else
+		break;
+	case 2:
 		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+		wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+		break;
+	default:
+		wil_err(wil, "invalid oob_mode: %d\n", mode);
+	}
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -1073,9 +1103,7 @@
 	napi_enable(&wil->napi_tx);
 	set_bit(wil_status_napi_en, wil->status);
 
-	if (wil->platform_ops.bus_request)
-		wil->platform_ops.bus_request(wil->platform_handle,
-					      WIL_MAX_BUS_REQUEST_KBPS);
+	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 
 	return 0;
 }
@@ -1099,8 +1127,7 @@
 
 	set_bit(wil_status_resetting, wil->status);
 
-	if (wil->platform_ops.bus_request)
-		wil->platform_ops.bus_request(wil->platform_handle, 0);
+	wil6210_bus_request(wil, 0);
 
 	wil_disable_irq(wil);
 	if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
@@ -1163,6 +1190,7 @@
 		    wil->halp.ref_cnt);
 
 	if (++wil->halp.ref_cnt == 1) {
+		reinit_completion(&wil->halp.comp);
 		wil6210_set_halp(wil);
 		rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
 		if (!rc) {
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 1a65d07..d80e7f4 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -90,7 +90,7 @@
 	done = budget - quota;
 
 	if (done < budget) {
-		napi_complete(napi);
+		napi_complete_done(napi, done);
 		wil6210_unmask_irq_rx(wil);
 		wil_dbg_txrx(wil, "NAPI RX complete\n");
 	}
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 7260bef..2ae4fe8 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -71,6 +71,11 @@
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 0;
+	}
+
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
 		rc = wil_down(wil);
@@ -86,10 +91,14 @@
 
 	if (wil->platform_ops.suspend) {
 		rc = wil->platform_ops.suspend(wil->platform_handle);
-		if (rc)
+		if (rc) {
 			wil_enable_irq(wil);
+			goto out;
+		}
 	}
 
+	set_bit(wil_status_suspended, wil->status);
+
 out:
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
@@ -117,10 +126,13 @@
 
 	/* if netif up, bring hardware up
 	 * During open(), IFF_UP set after actual device method
-	 * invocation. This prevent recursive call to wil_up()
+	 * invocation. This prevent recursive call to wil_up().
+	 * wil_status_suspended will be cleared in wil_reset
 	 */
 	if (ndev->flags & IFF_UP)
 		rc = wil_up(wil);
+	else
+		clear_bit(wil_status_suspended, wil->status);
 
 out:
 	wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4bccef3..734449d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -41,6 +41,7 @@
 #define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
 #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
+#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
 /**
@@ -140,6 +141,7 @@
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
+	#define BIT_USER_OOB_R2_MODE		BIT(30)
 #define RGF_USER_USAGE_8		(0x880020)
 	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
 	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
@@ -413,6 +415,7 @@
 	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
+	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_last /* keep last */
 };
 
@@ -616,6 +619,8 @@
 	u16 channel; /* relevant in AP mode */
 	int sinfo_gen;
 	u32 ap_isolate; /* no intra-BSS communication */
+	struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
+	int locally_generated_disc; /* relevant in STA mode */
 	/* interrupt moderation */
 	u32 tx_max_burst_duration;
 	u32 tx_interframe_timeout;
@@ -771,6 +776,12 @@
 			 print_hex_dump_debug("DBG[ WMI]" prefix_str,\
 					prefix_type, rowsize,	\
 					groupsize, buf, len, ascii)
+
+#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize,	\
+			  groupsize, buf, len, ascii)		\
+			  print_hex_dump_debug("DBG[MISC]" prefix_str,\
+					prefix_type, rowsize,	\
+					groupsize, buf, len, ascii)
 #else /* defined(CONFIG_DYNAMIC_DEBUG) */
 static inline
 void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
@@ -783,18 +794,18 @@
 		      int groupsize, const void *buf, size_t len, bool ascii)
 {
 }
+
+static inline
+void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize,
+		       int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
 #endif /* defined(CONFIG_DYNAMIC_DEBUG) */
 
 void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
 			  size_t count);
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 			size_t count);
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
-				 const volatile void __iomem *src,
-				 size_t count);
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
-			       volatile void __iomem *dst,
-			       const void *src, size_t count);
 
 void *wil_if_alloc(struct device *dev);
 void wil_if_free(struct wil6210_priv *wil);
@@ -910,7 +921,7 @@
 		 u8 type);
 int wmi_abort_scan(struct wil6210_priv *wil);
 void wil_abort_scan(struct wil6210_priv *wil, bool sync);
-
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
 			u16 reason_code, bool from_event);
 void wil_probe_client_flush(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0ede7f7..31d6ab9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -566,6 +566,7 @@
 	    (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
 		if (rc) {
 			netif_carrier_off(ndev);
+			wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 			wil_err(wil, "cfg80211_connect_result with failure\n");
 			cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
 						NULL, 0,
@@ -573,12 +574,16 @@
 						GFP_KERNEL);
 			goto out;
 		} else {
-			cfg80211_connect_result(ndev, evt->bssid,
-						assoc_req_ie, assoc_req_ielen,
-						assoc_resp_ie, assoc_resp_ielen,
-						WLAN_STATUS_SUCCESS,
-						GFP_KERNEL);
+			struct wiphy *wiphy = wil_to_wiphy(wil);
+
+			cfg80211_ref_bss(wiphy, wil->bss);
+			cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
+					     assoc_req_ie, assoc_req_ielen,
+					     assoc_resp_ie, assoc_resp_ielen,
+					     WLAN_STATUS_SUCCESS, GFP_KERNEL,
+					     NL80211_TIMEOUT_UNSPECIFIED);
 		}
+		wil->bss = NULL;
 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
 		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
 		if (rc) {
@@ -1524,6 +1529,7 @@
 
 	wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
 
+	wil->locally_generated_disc = true;
 	if (del_sta) {
 		ether_addr_copy(del_sta_cmd.dst_mac, mac);
 		rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
@@ -1765,14 +1771,19 @@
 
 void wmi_event_flush(struct wil6210_priv *wil)
 {
+	ulong flags;
 	struct pending_wmi_event *evt, *t;
 
 	wil_dbg_wmi(wil, "event_flush\n");
 
+	spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
 	list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
 		list_del(&evt->list);
 		kfree(evt);
 	}
+
+	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 6555eb7..6ae8964 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -120,6 +120,20 @@
 
 	  If unsure, say N.
 
+config PCI_MSM
+	bool "MSM PCIe Controller driver"
+	depends on ARCH_QCOM && PCI
+	select PCI_DOMAINS
+	select PCI_DOMAINS_GENERIC
+	select PCI_MSI
+	help
+	  Enables the PCIe functionality by configuring PCIe core on
+	  MSM chipset and by enabling the ARM PCI framework extension.
+	  The PCIe core is essential for communication between the host
+	  and an endpoint.
+
+	  If unsure, say N.
+
 config PCI_LABEL
 	def_bool y if (DMI || ACPI)
 	select NLS
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 6523cb0..a0fa943 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -27,11 +27,11 @@
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
-#include <linux/regulator/rpm-smd-regulator.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/of_gpio.h>
-#include <linux/clk/msm-clk.h>
+#include <linux/clk/qcom.h>
 #include <linux/reset.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
@@ -48,170 +48,27 @@
 #include <linux/ipc_logging.h>
 #include <linux/msm_pcie.h>
 
-#ifdef CONFIG_ARCH_MDMCALIFORNIUM
 #define PCIE_VENDOR_ID_RCP		0x17cb
-#define PCIE_DEVICE_ID_RCP		0x0302
-
-#define PCIE20_L1SUB_CONTROL1		0x158
-#define PCIE20_PARF_DBI_BASE_ADDR	0x350
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x358
-
-#define TX_BASE 0x200
-#define RX_BASE 0x400
-#define PCS_BASE 0x800
-#define PCS_MISC_BASE 0x600
-
-#elif defined(CONFIG_ARCH_MSM8998)
-#define PCIE_VENDOR_ID_RCP		0x17cb
-#define PCIE_DEVICE_ID_RCP		0x0105
+#define PCIE_DEVICE_ID_RCP		0x0106
 
 #define PCIE20_L1SUB_CONTROL1		0x1E4
 #define PCIE20_PARF_DBI_BASE_ADDR       0x350
 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
 
-#define TX_BASE 0
-#define RX_BASE 0
 #define PCS_BASE 0x800
-#define PCS_MISC_BASE 0
 
-#else
-#define PCIE_VENDOR_ID_RCP		0x17cb
-#define PCIE_DEVICE_ID_RCP		0x0104
-
-#define PCIE20_L1SUB_CONTROL1		0x158
-#define PCIE20_PARF_DBI_BASE_ADDR	0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x16C
-
-#define TX_BASE 0x1000
-#define RX_BASE 0x1200
-#define PCS_BASE 0x1400
-#define PCS_MISC_BASE 0
-#endif
-
-#define TX(n, m) (TX_BASE + n * m * 0x1000)
-#define RX(n, m) (RX_BASE + n * m * 0x1000)
 #define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
-#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
-
-#define QSERDES_COM_BG_TIMER			0x00C
-#define QSERDES_COM_SSC_EN_CENTER		0x010
-#define QSERDES_COM_SSC_ADJ_PER1		0x014
-#define QSERDES_COM_SSC_ADJ_PER2		0x018
-#define QSERDES_COM_SSC_PER1			0x01C
-#define QSERDES_COM_SSC_PER2			0x020
-#define QSERDES_COM_SSC_STEP_SIZE1		0x024
-#define QSERDES_COM_SSC_STEP_SIZE2		0x028
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x034
-#define QSERDES_COM_CLK_ENABLE1			0x038
-#define QSERDES_COM_SYS_CLK_CTRL		0x03C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x040
-#define QSERDES_COM_PLL_IVCO			0x048
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x04C
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x050
-#define QSERDES_COM_LOCK_CMP3_MODE0		0x054
-#define QSERDES_COM_BG_TRIM			0x070
-#define QSERDES_COM_CLK_EP_DIV			0x074
-#define QSERDES_COM_CP_CTRL_MODE0		0x078
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x084
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x090
-#define QSERDES_COM_SYSCLK_EN_SEL		0x0AC
-#define QSERDES_COM_RESETSM_CNTRL		0x0B4
-#define QSERDES_COM_RESTRIM_CTRL		0x0BC
-#define QSERDES_COM_RESCODE_DIV_NUM		0x0C4
-#define QSERDES_COM_LOCK_CMP_EN			0x0C8
-#define QSERDES_COM_DEC_START_MODE0		0x0D0
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x0DC
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x0E0
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x0E4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x108
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x10C
-#define QSERDES_COM_VCO_TUNE_CTRL		0x124
-#define QSERDES_COM_VCO_TUNE_MAP		0x128
-#define QSERDES_COM_VCO_TUNE1_MODE0		0x12C
-#define QSERDES_COM_VCO_TUNE2_MODE0		0x130
-#define QSERDES_COM_VCO_TUNE_TIMER1		0x144
-#define QSERDES_COM_VCO_TUNE_TIMER2		0x148
-#define QSERDES_COM_BG_CTRL			0x170
-#define QSERDES_COM_CLK_SELECT			0x174
-#define QSERDES_COM_HSCLK_SEL			0x178
-#define QSERDES_COM_CORECLK_DIV			0x184
-#define QSERDES_COM_CORE_CLK_EN			0x18C
-#define QSERDES_COM_C_READY_STATUS		0x190
-#define QSERDES_COM_CMN_CONFIG			0x194
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x19C
-#define QSERDES_COM_DEBUG_BUS0			0x1A0
-#define QSERDES_COM_DEBUG_BUS1			0x1A4
-#define QSERDES_COM_DEBUG_BUS2			0x1A8
-#define QSERDES_COM_DEBUG_BUS3			0x1AC
-#define QSERDES_COM_DEBUG_BUS_SEL		0x1B0
-
-#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m)		(TX(n, m) + 0x4C)
-#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m)		(TX(n, m) + 0x64)
-#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
-#define QSERDES_TX_N_LANE_MODE(n, m)			(TX(n, m) + 0x94)
-#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m)		(TX(n, m) + 0xAC)
-
-#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m)		(RX(n, m) + 0x010)
-#define QSERDES_RX_N_UCDR_SO_GAIN(n, m)			(RX(n, m) + 0x01C)
-#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m)	(RX(n, m) + 0x0D8)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m)	(RX(n, m) + 0x0DC)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m)	(RX(n, m) + 0x0E0)
-#define QSERDES_RX_N_SIGDET_ENABLES(n, m)		(RX(n, m) + 0x110)
-#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m)	(RX(n, m) + 0x11C)
-#define QSERDES_RX_N_SIGDET_LVL(n, m)			(RX(n, m) + 0x118)
-#define QSERDES_RX_N_RX_BAND(n, m)			(RX(n, m) + 0x120)
-
-#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x00)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x04)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x08)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x0C)
-#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x14)
-#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x18)
-#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x1C)
-#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x20)
 
 #define PCIE_N_SW_RESET(n, m)			(PCS_PORT(n, m) + 0x00)
 #define PCIE_N_POWER_DOWN_CONTROL(n, m)		(PCS_PORT(n, m) + 0x04)
-#define PCIE_N_START_CONTROL(n, m)		(PCS_PORT(n, m) + 0x08)
-#define PCIE_N_TXDEEMPH_M6DB_V0(n, m)		(PCS_PORT(n, m) + 0x24)
-#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m)		(PCS_PORT(n, m) + 0x28)
-#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m)	(PCS_PORT(n, m) + 0x54)
-#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m)		(PCS_PORT(n, m) + 0x58)
-#define PCIE_N_POWER_STATE_CONFIG1(n, m)	(PCS_PORT(n, m) + 0x60)
-#define PCIE_N_POWER_STATE_CONFIG4(n, m)	(PCS_PORT(n, m) + 0x6C)
-#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA0)
-#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA4)
-#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m)	(PCS_PORT(n, m) + 0xA8)
-#define PCIE_N_TEST_CONTROL4(n, m)		(PCS_PORT(n, m) + 0x11C)
-#define PCIE_N_TEST_CONTROL5(n, m)		(PCS_PORT(n, m) + 0x120)
-#define PCIE_N_TEST_CONTROL6(n, m)		(PCS_PORT(n, m) + 0x124)
-#define PCIE_N_TEST_CONTROL7(n, m)		(PCS_PORT(n, m) + 0x128)
 #define PCIE_N_PCS_STATUS(n, m)			(PCS_PORT(n, m) + 0x174)
-#define PCIE_N_DEBUG_BUS_0_STATUS(n, m)		(PCS_PORT(n, m) + 0x198)
-#define PCIE_N_DEBUG_BUS_1_STATUS(n, m)		(PCS_PORT(n, m) + 0x19C)
-#define PCIE_N_DEBUG_BUS_2_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A0)
-#define PCIE_N_DEBUG_BUS_3_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A4)
-#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1A8)
-#define PCIE_N_OSC_DTCT_ACTIONS(n, m)			(PCS_PORT(n, m) + 0x1AC)
-#define PCIE_N_SIGDET_CNTRL(n, m)			(PCS_PORT(n, m) + 0x1B0)
-#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m)	(PCS_PORT(n, m) + 0x1DC)
-#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1E0)
 
 #define PCIE_COM_SW_RESET		0x400
 #define PCIE_COM_POWER_DOWN_CONTROL	0x404
-#define PCIE_COM_START_CONTROL		0x408
-#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX	0x438
-#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX	0x43C
-#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX	0x440
-#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX	0x444
 #define PCIE_COM_PCS_READY_STATUS	0x448
-#define PCIE_COM_DEBUG_BUS_0_STATUS	0x45C
-#define PCIE_COM_DEBUG_BUS_1_STATUS	0x460
-#define PCIE_COM_DEBUG_BUS_2_STATUS	0x464
-#define PCIE_COM_DEBUG_BUS_3_STATUS	0x468
 
 #define PCIE20_PARF_SYS_CTRL	     0x00
+#define PCIE20_PARF_PM_CTRL		0x20
 #define PCIE20_PARF_PM_STTS		0x24
 #define PCIE20_PARF_PCS_DEEMPH	   0x34
 #define PCIE20_PARF_PCS_SWING	    0x38
@@ -228,6 +85,7 @@
 #define PCIE20_PARF_SID_OFFSET		0x234
 #define PCIE20_PARF_BDF_TRANSLATE_CFG	0x24C
 #define PCIE20_PARF_BDF_TRANSLATE_N	0x250
+#define PCIE20_PARF_DEVICE_TYPE		0x1000
 
 #define PCIE20_ELBI_VERSION		0x00
 #define PCIE20_ELBI_SYS_CTRL	     0x04
@@ -300,7 +158,7 @@
 #define MAX_PROP_SIZE 32
 #define MAX_RC_NAME_LEN 15
 #define MSM_PCIE_MAX_VREG 4
-#define MSM_PCIE_MAX_CLK 9
+#define MSM_PCIE_MAX_CLK 12
 #define MSM_PCIE_MAX_PIPE_CLK 1
 #define MAX_RC_NUM 3
 #define MAX_DEVICE_NUM 20
@@ -314,7 +172,7 @@
 #define PCIE_CLEAR				0xDEADBEEF
 #define PCIE_LINK_DOWN				0xFFFFFFFF
 
-#define MSM_PCIE_MAX_RESET 4
+#define MSM_PCIE_MAX_RESET 5
 #define MSM_PCIE_MAX_PIPE_RESET 1
 
 #define MSM_PCIE_MSI_PHY 0xa0000000
@@ -629,7 +487,6 @@
 	uint32_t			wr_halt_size;
 	uint32_t			cpl_timeout;
 	uint32_t			current_bdf;
-	short				current_short_bdf;
 	uint32_t			perst_delay_us_min;
 	uint32_t			perst_delay_us_max;
 	uint32_t			tlp_rd_size;
@@ -734,18 +591,21 @@
 static struct msm_pcie_reset_info_t
 msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
 	{
+		{NULL, "pcie_0_core_reset", false},
 		{NULL, "pcie_phy_reset", false},
 		{NULL, "pcie_phy_com_reset", false},
 		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
 		{NULL, "pcie_0_phy_reset", false}
 	},
 	{
+		{NULL, "pcie_1_core_reset", false},
 		{NULL, "pcie_phy_reset", false},
 		{NULL, "pcie_phy_com_reset", false},
 		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
 		{NULL, "pcie_1_phy_reset", false}
 	},
 	{
+		{NULL, "pcie_2_core_reset", false},
 		{NULL, "pcie_phy_reset", false},
 		{NULL, "pcie_phy_com_reset", false},
 		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
@@ -778,6 +638,9 @@
 	{NULL, "pcie_0_slv_axi_clk", 0, true, true},
 	{NULL, "pcie_0_ldo", 0, false, true},
 	{NULL, "pcie_0_smmu_clk", 0, false, false},
+	{NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_phy_refgen_clk", 0, false, false},
+	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
 	{NULL, "pcie_phy_aux_clk", 0, false, false}
 	},
@@ -789,6 +652,9 @@
 	{NULL, "pcie_1_slv_axi_clk", 0, true,  true},
 	{NULL, "pcie_1_ldo", 0, false, true},
 	{NULL, "pcie_1_smmu_clk", 0, false, false},
+	{NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_phy_refgen_clk", 0, false, false},
+	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
 	{NULL, "pcie_phy_aux_clk", 0, false, false}
 	},
@@ -800,6 +666,9 @@
 	{NULL, "pcie_2_slv_axi_clk", 0, true, true},
 	{NULL, "pcie_2_ldo", 0, false, true},
 	{NULL, "pcie_2_smmu_clk", 0, false, false},
+	{NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_phy_refgen_clk", 0, false, false},
+	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
 	{NULL, "pcie_phy_aux_clk", 0, false, false}
 	}
@@ -860,6 +729,8 @@
 	{"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
 };
 
+static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
+
 #ifdef CONFIG_ARM
 #define PCIE_BUS_PRIV_DATA(bus) \
 	(((struct pci_sys_data *)bus->sysdata)->private_data)
@@ -938,393 +809,9 @@
 			dev->rc_idx, info->name);
 }
 
-#if defined(CONFIG_ARCH_FSM9010)
-#define PCIE20_PARF_PHY_STTS         0x3c
-#define PCIE2_PHY_RESET_CTRL         0x44
-#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
-#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
-#define PCIE20_PARF_PCS_SWING_CTRL1  0x88
-#define PCIE20_PARF_PCS_SWING_CTRL2  0x8c
-#define PCIE20_PARF_PCS_DEEMPH1      0x74
-#define PCIE20_PARF_PCS_DEEMPH2      0x78
-#define PCIE20_PARF_PCS_DEEMPH3      0x7c
-#define PCIE20_PARF_CONFIGBITS       0x84
-#define PCIE20_PARF_PHY_CTRL3        0x94
-#define PCIE20_PARF_PCS_CTRL         0x80
-
-#define TX_AMP_VAL                   127
-#define PHY_RX0_EQ_GEN1_VAL          0
-#define PHY_RX0_EQ_GEN2_VAL          4
-#define TX_DEEMPH_GEN1_VAL           24
-#define TX_DEEMPH_GEN2_3_5DB_VAL     24
-#define TX_DEEMPH_GEN2_6DB_VAL       34
-#define PHY_TX0_TERM_OFFST_VAL       0
-
-static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
-{
-}
-
-static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
-{
-	msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
-					 BIT(0), (assert) ? 1 : 0);
-}
-
-static void pcie_phy_init(struct msm_pcie_dev_t *dev)
-{
-	PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
-		dev->rc_idx);
-
-	/* De-assert Phy SW Reset */
-	pcie20_phy_reset(dev, 1);
-
-	/* Program SSP ENABLE */
-	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
-								 BIT(0), 0);
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
-								 BIT(0)) == 0)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
-								 BIT(0), 1);
-	/* Program Tx Amplitude */
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
-		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				TX_AMP_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
-			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				TX_AMP_VAL);
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
-		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				TX_AMP_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
-			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				TX_AMP_VAL);
-	/* Program De-Emphasis */
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
-			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				TX_DEEMPH_GEN2_6DB_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
-			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				TX_DEEMPH_GEN2_6DB_VAL);
-
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
-			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				TX_DEEMPH_GEN2_3_5DB_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
-			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				TX_DEEMPH_GEN2_3_5DB_VAL);
-
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
-			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				TX_DEEMPH_GEN1_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
-			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				TX_DEEMPH_GEN1_VAL);
-
-	/* Program Rx_Eq */
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
-			(BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
-				 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
-
-	/* Program Tx0_term_offset */
-	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
-			(BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
-				PHY_TX0_TERM_OFFST_VAL)
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
-			 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-				PHY_TX0_TERM_OFFST_VAL);
-
-	/* Program REF_CLK source */
-	msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
-		(dev->ext_ref_clk) ? 1 : 0);
-	/* disable Tx2Rx Loopback */
-	if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
-		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
-								 BIT(1), 0);
-	/* De-assert Phy SW Reset */
-	pcie20_phy_reset(dev, 0);
-}
-
-static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
-{
-
-	/* read PCIE20_PARF_PHY_STTS twice */
-	readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
-	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
-		return false;
-	else
-		return true;
-}
-#else
-static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
-					u32 cntrl4_val, u32 cntrl5_val,
-					u32 cntrl6_val, u32 cntrl7_val)
-{
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
-
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_TEST_CONTROL4(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_TEST_CONTROL5(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_TEST_CONTROL6(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_TEST_CONTROL7(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
-				dev->common_phy)));
-	PCIE_DUMP(dev,
-		"PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
-		readl_relaxed(dev->phy +
-			PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
-				dev->common_phy)));
-}
-
 static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
 {
 	int i, size;
-	u32 write_val;
-
-	if (dev->phy_ver >= 0x20) {
-		PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
-			dev->rc_idx);
-		return;
-	}
-
-	PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
-
-	pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
-	pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
-	pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
-
-	for (i = 0; i < 3; i++) {
-		write_val = 0x1 + i;
-		msm_pcie_write_reg(dev->phy,
-			QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
-				dev->common_phy), write_val);
-		PCIE_DUMP(dev,
-			"PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
-					dev->common_phy)));
-
-		pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
-	}
-
-	pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
-
-	if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
-		pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
-		pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
-		pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
-
-		for (i = 0; i < 8; i += 4) {
-			write_val = 0x1 + i;
-			msm_pcie_write_reg(dev->phy,
-				PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
-					dev->common_phy), write_val);
-			msm_pcie_write_reg(dev->phy,
-				PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
-					dev->common_phy), write_val + 1);
-			msm_pcie_write_reg(dev->phy,
-				PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
-					dev->common_phy), write_val + 2);
-			msm_pcie_write_reg(dev->phy,
-				PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
-					dev->common_phy), write_val + 3);
-
-			PCIE_DUMP(dev,
-				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_0_STATUS(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_1_STATUS(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_2_STATUS(
-						dev->rc_idx, dev->common_phy)));
-			PCIE_DUMP(dev,
-				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
-				dev->rc_idx,
-				readl_relaxed(dev->phy +
-					PCIE_MISC_N_DEBUG_BUS_3_STATUS(
-						dev->rc_idx, dev->common_phy)));
-		}
-
-		msm_pcie_write_reg(dev->phy,
-			PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
-				dev->rc_idx, dev->common_phy), 0);
-		msm_pcie_write_reg(dev->phy,
-			PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
-				dev->rc_idx, dev->common_phy), 0);
-		msm_pcie_write_reg(dev->phy,
-			PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
-				dev->rc_idx, dev->common_phy), 0);
-		msm_pcie_write_reg(dev->phy,
-			PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
-				dev->rc_idx, dev->common_phy), 0);
-	}
-
-	for (i = 0; i < 2; i++) {
-		write_val = 0x2 + i;
-
-		msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
-			write_val);
-
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
-	}
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
-
-	if (dev->common_phy) {
-		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
-			0x01);
-		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
-			0x02);
-		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
-			0x03);
-		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
-			0x04);
-
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_0_STATUS));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_1_STATUS));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_2_STATUS));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_3_STATUS));
-
-		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
-			0x05);
-
-		PCIE_DUMP(dev,
-			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
-		PCIE_DUMP(dev,
-			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
-			dev->rc_idx,
-			readl_relaxed(dev->phy +
-				PCIE_COM_DEBUG_BUS_0_STATUS));
-	}
 
 	size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
 	for (i = 0; i < size; i += 32) {
@@ -1342,181 +829,6 @@
 	}
 }
 
-#ifdef CONFIG_ARCH_MDMCALIFORNIUM
-static void pcie_phy_init(struct msm_pcie_dev_t *dev)
-{
-	u8 common_phy;
-
-	PCIE_DBG(dev,
-		"RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
-		dev->rc_idx);
-
-	if (dev->common_phy)
-		common_phy = 1;
-	else
-		common_phy = 0;
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
-		0x01);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
-		0x03);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
-
-	msm_pcie_write_reg(dev->phy,
-			QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
-
-	if (dev->tcsr) {
-		PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
-			dev->rc_idx, readl_relaxed(dev->tcsr));
-
-		if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
-			msm_pcie_write_reg(dev->phy,
-					QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
-		else
-			msm_pcie_write_reg(dev->phy,
-					QSERDES_COM_SYSCLK_EN_SEL, 0x04);
-	}
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
-
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
-		common_phy), 0x45);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
-
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
-		0x02);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
-		0x12);
-
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
-		0x1C);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
-		0x14);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
-		0x01);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
-		0xDB);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
-		common_phy),
-		0x4B);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
-		0x04);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
-		0x04);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
-		0x04);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
-		0x40);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
-		0x40);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
-		0x40);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
-		0x73);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
-		0x99);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
-		0x15);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
-		0x0E);
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
-		0x07);
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
-		0x03);
-}
-
-static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
-{
-}
-
-static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
-{
-	if (readl_relaxed(dev->phy +
-		PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
-		return false;
-	else
-		return true;
-}
-#else
 static void pcie_phy_init(struct msm_pcie_dev_t *dev)
 {
 	int i;
@@ -1538,64 +850,6 @@
 					phy_seq->delay + 1);
 			phy_seq++;
 		}
-		return;
-	}
-
-	if (dev->common_phy)
-		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
-
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
-	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
-
-	if (dev->phy_ver == 0x3) {
-		msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
-		msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
-	}
-
-	if (dev->common_phy) {
-		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
-		msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
 	}
 }
 
@@ -1603,18 +857,9 @@
 {
 	int i;
 	struct msm_pcie_phy_info_t *phy_seq;
-	u8 common_phy;
-
-	if (dev->phy_ver >= 0x20)
-		return;
 
 	PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
 
-	if (dev->common_phy)
-		common_phy = 1;
-	else
-		common_phy = 0;
-
 	if (dev->port_phy_sequence) {
 		i =  dev->port_phy_len;
 		phy_seq = dev->port_phy_sequence;
@@ -1627,93 +872,8 @@
 					phy_seq->delay + 1);
 			phy_seq++;
 		}
-		return;
 	}
 
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
-		common_phy), 0x45);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
-		0x06);
-
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
-		0x1C);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
-		0x17);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
-		0x01);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
-		0xDB);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
-		0x18);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
-		0x04);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
-		0x04);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
-		0x4C);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
-		0x01);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
-		0x05);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
-		common_phy), 0x4B);
-	msm_pcie_write_reg(dev->phy,
-		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
-		0x14);
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
-		0x05);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
-		0x02);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
-		0xA3);
-
-	if (dev->phy_ver == 0x3) {
-		msm_pcie_write_reg(dev->phy,
-			QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
-			0x19);
-
-		msm_pcie_write_reg(dev->phy,
-			PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
-			0x0E);
-	}
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
-		0x03);
-	usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
-
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
-		0x00);
-	msm_pcie_write_reg(dev->phy,
-		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
-		0x0A);
 }
 
 static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
@@ -1732,8 +892,6 @@
 	else
 		return true;
 }
-#endif
-#endif
 
 static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
 {
@@ -1975,8 +1133,6 @@
 		dev->msi_gicm_base);
 	PCIE_DBG_FS(dev, "bus_client: %d\n",
 		dev->bus_client);
-	PCIE_DBG_FS(dev, "current short bdf: %d\n",
-		dev->current_short_bdf);
 	PCIE_DBG_FS(dev, "smmu does %s exist\n",
 		dev->smmu_exist ? "" : "not");
 	PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
@@ -3512,8 +2668,8 @@
 						dev->rc_idx,
 						dev->vreg[i].name);
 					regulator_set_voltage(hdl,
-						RPM_REGULATOR_CORNER_NONE,
-						INT_MAX);
+						RPMH_REGULATOR_LEVEL_OFF,
+						RPMH_REGULATOR_LEVEL_MAX);
 				}
 			}
 
@@ -3542,8 +2698,8 @@
 					dev->rc_idx,
 					dev->vreg[i].name);
 				regulator_set_voltage(dev->vreg[i].hdl,
-					RPM_REGULATOR_CORNER_NONE,
-					INT_MAX);
+					RPMH_REGULATOR_LEVEL_OFF,
+					RPMH_REGULATOR_LEVEL_MAX);
 			}
 		}
 	}
@@ -3645,6 +2801,19 @@
 	for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
 		reset_info = &dev->reset[i];
 		if (reset_info->hdl) {
+			rc = reset_control_assert(reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to assert reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully asserted reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+
+			/* add a 1ms delay to ensure the reset is asserted */
+			usleep_range(1000, 1005);
+
 			rc = reset_control_deassert(reset_info->hdl);
 			if (rc)
 				PCIE_ERR(dev,
@@ -3749,6 +2918,19 @@
 	for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
 		pipe_reset_info = &dev->pipe_reset[i];
 		if (pipe_reset_info->hdl) {
+			rc = reset_control_assert(pipe_reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to assert pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully asserted pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+
+			/* add a 1ms delay to ensure the reset is asserted */
+			usleep_range(1000, 1005);
+
 			rc = reset_control_deassert(
 					pipe_reset_info->hdl);
 			if (rc)
@@ -3802,8 +2984,6 @@
 
 static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
 {
-	int i;
-
 	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
 
 	/*
@@ -3859,27 +3039,6 @@
 		PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
 			readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
 	}
-
-	/* configure SMMU registers */
-	if (dev->smmu_exist) {
-		msm_pcie_write_reg(dev->parf,
-			PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
-		msm_pcie_write_reg(dev->parf,
-			PCIE20_PARF_SID_OFFSET, 0);
-
-		if (dev->enumerated) {
-			for (i = 0; i < MAX_DEVICE_NUM; i++) {
-				if (dev->pcidev_table[i].dev &&
-					dev->pcidev_table[i].short_bdf) {
-					msm_pcie_write_reg(dev->parf,
-						PCIE20_PARF_BDF_TRANSLATE_N +
-						dev->pcidev_table[i].short_bdf
-						* 4,
-						dev->pcidev_table[i].bdf >> 16);
-				}
-			}
-		}
-	}
 }
 
 static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
@@ -4527,6 +3686,13 @@
 		msm_pcie_restore_sec_config(dev);
 	}
 
+	/* configure PCIe to RC mode */
+	msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
+
+	/* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
+	if (dev->l1_supported)
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+
 	/* enable PCIe clocks and resets */
 	msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
 
@@ -4688,6 +3854,9 @@
 
 	msm_pcie_config_link_state(dev);
 
+	if (dev->enumerated)
+		pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
+
 	dev->link_status = MSM_PCIE_LINK_ENABLED;
 	dev->power_on = true;
 	dev->suspending = false;
@@ -4935,106 +4104,41 @@
 	return ret;
 }
 
-int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
+static void msm_pcie_configure_sid(struct msm_pcie_dev_t *pcie_dev,
+				struct pci_dev *dev)
 {
-	struct pci_dev *pcidev;
-	struct msm_pcie_dev_t *pcie_dev;
-	struct pci_bus *bus;
-	int i;
+	u32 offset;
+	u32 sid;
 	u32 bdf;
+	int ret;
 
-	if (!dev) {
-		pr_err("%s: PCIe: endpoint device passed in is NULL\n",
-			__func__);
-		return MSM_PCIE_ERROR;
-	}
-
-	pcidev = to_pci_dev(dev);
-	if (!pcidev) {
-		pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
-			__func__);
-		return MSM_PCIE_ERROR;
-	}
-
-	bus = pcidev->bus;
-	if (!bus) {
-		pr_err("%s: PCIe: Bus of PCI device is NULL\n",
-			__func__);
-		return MSM_PCIE_ERROR;
-	}
-
-	while (!pci_is_root_bus(bus))
-		bus = bus->parent;
-
-	pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
-	if (!pcie_dev) {
-		pr_err("%s: PCIe: Could not get PCIe structure\n",
-			__func__);
-		return MSM_PCIE_ERROR;
-	}
-
-	if (!pcie_dev->smmu_exist) {
+	ret = iommu_fwspec_get_id(&dev->dev, &sid);
+	if (ret) {
 		PCIE_DBG(pcie_dev,
-			"PCIe: RC:%d: smmu does not exist\n",
+			"PCIe: RC%d: Device does not have a SID\n",
 			pcie_dev->rc_idx);
-		return MSM_PCIE_ERROR;
-	}
-
-	PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
-		pcie_dev->rc_idx, dev);
-	PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
-		pcie_dev->rc_idx, pcidev);
-
-	*domain = pcie_dev->rc_idx;
-
-	if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
-		pcie_dev->current_short_bdf++;
-	} else {
-		PCIE_ERR(pcie_dev,
-			"PCIe: RC%d: No more short BDF left\n",
-			pcie_dev->rc_idx);
-		return MSM_PCIE_ERROR;
-	}
-
-	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
-
-	for (i = 0; i < MAX_DEVICE_NUM; i++) {
-		if (pcie_dev->pcidev_table[i].bdf == bdf) {
-			*sid = pcie_dev->smmu_sid_base +
-				((pcie_dev->rc_idx << 4) |
-				pcie_dev->current_short_bdf);
-
-			msm_pcie_write_reg(pcie_dev->parf,
-				PCIE20_PARF_BDF_TRANSLATE_N +
-				pcie_dev->current_short_bdf * 4,
-				bdf >> 16);
-
-			pcie_dev->pcidev_table[i].sid = *sid;
-			pcie_dev->pcidev_table[i].short_bdf =
-				pcie_dev->current_short_bdf;
-			break;
-		}
-	}
-
-	if (i == MAX_DEVICE_NUM) {
-		pcie_dev->current_short_bdf--;
-		PCIE_ERR(pcie_dev,
-			"PCIe: RC%d could not find BDF:%d\n",
-			pcie_dev->rc_idx, bdf);
-		return MSM_PCIE_ERROR;
+		return;
 	}
 
 	PCIE_DBG(pcie_dev,
-		"PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
-		pcie_dev->rc_idx,
-		bdf >> 24,
-		bdf >> 19 & 0x1f,
-		bdf >> 16 & 0x07,
-		*sid);
+		"PCIe: RC%d: Device SID: 0x%x\n",
+		pcie_dev->rc_idx, sid);
 
-	return 0;
+	bdf = BDF_OFFSET(dev->bus->number, dev->devfn);
+	offset = (sid - pcie_dev->smmu_sid_base) * 4;
+
+	if (offset >= MAX_SHORT_BDF_NUM * 4) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: Invalid SID offset: 0x%x. Should be less than 0x%x\n",
+			pcie_dev->rc_idx, offset, MAX_SHORT_BDF_NUM * 4);
+		return;
+	}
+
+	msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
+	msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_SID_OFFSET, 0);
+	msm_pcie_write_reg(pcie_dev->parf,
+		PCIE20_PARF_BDF_TRANSLATE_N + offset, bdf >> 16);
 }
-EXPORT_SYMBOL(msm_pcie_configure_sid);
 
 int msm_pcie_enumerate(u32 rc_idx)
 {
@@ -6138,6 +5242,28 @@
 		disable_irq(dev->wake_n);
 }
 
+static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
+	u8 busnr = dev->bus->number;
+	u8 slot = PCI_SLOT(dev->devfn);
+	u8 func = PCI_FUNC(dev->devfn);
+
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
+		pcie_dev->rc_idx, busnr, slot, func);
+
+	msm_pcie_configure_sid(pcie_dev, dev);
+
+	return 0;
+}
+
+/* Hook to setup PCI device during PCI framework scan */
+int pcibios_add_device(struct pci_dev *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	return msm_pcie_config_device(dev, pcie_dev);
+}
 
 static int msm_pcie_probe(struct platform_device *pdev)
 {
@@ -6413,7 +5539,6 @@
 	msm_pcie_dev[rc_idx].wake_counter = 0;
 	msm_pcie_dev[rc_idx].aer_enable = true;
 	msm_pcie_dev[rc_idx].power_on = false;
-	msm_pcie_dev[rc_idx].current_short_bdf = 0;
 	msm_pcie_dev[rc_idx].use_msi = false;
 	msm_pcie_dev[rc_idx].use_pinctrl = false;
 	msm_pcie_dev[rc_idx].linkdown_panic = false;
@@ -6764,12 +5889,12 @@
 		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
 			pcie_dev->rc_idx);
 
-		msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
-
 	if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
 		pinctrl_select_state(pcie_dev->pinctrl,
 					pcie_dev->pins_sleep);
 
+	msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
 	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
 
 	return ret;
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 67adf58..30c31a8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -526,7 +526,6 @@
 	msm_mux_reserved30,
 	msm_mux_qup11,
 	msm_mux_qup14,
-	msm_mux_phase_flag3,
 	msm_mux_reserved96,
 	msm_mux_ldo_en,
 	msm_mux_reserved97,
@@ -543,17 +542,13 @@
 	msm_mux_phase_flag5,
 	msm_mux_reserved103,
 	msm_mux_reserved104,
-	msm_mux_pcie1_forceon,
 	msm_mux_uim2_data,
 	msm_mux_qup13,
 	msm_mux_reserved105,
-	msm_mux_pcie1_pwren,
 	msm_mux_uim2_clk,
 	msm_mux_reserved106,
-	msm_mux_pcie1_auxen,
 	msm_mux_uim2_reset,
 	msm_mux_reserved107,
-	msm_mux_pcie1_button,
 	msm_mux_uim2_present,
 	msm_mux_reserved108,
 	msm_mux_uim1_data,
@@ -564,7 +559,6 @@
 	msm_mux_reserved111,
 	msm_mux_uim1_present,
 	msm_mux_reserved112,
-	msm_mux_pcie1_prsnt2,
 	msm_mux_uim_batt,
 	msm_mux_edp_hot,
 	msm_mux_reserved113,
@@ -587,7 +581,6 @@
 	msm_mux_reserved123,
 	msm_mux_reserved124,
 	msm_mux_reserved125,
-	msm_mux_sd_card,
 	msm_mux_reserved126,
 	msm_mux_reserved127,
 	msm_mux_reserved128,
@@ -647,7 +640,6 @@
 	msm_mux_reserved42,
 	msm_mux_reserved43,
 	msm_mux_reserved44,
-	msm_mux_bt_reset,
 	msm_mux_qup6,
 	msm_mux_reserved45,
 	msm_mux_reserved46,
@@ -672,7 +664,6 @@
 	msm_mux_gcc_gp1,
 	msm_mux_phase_flag18,
 	msm_mux_reserved57,
-	msm_mux_ssc_irq,
 	msm_mux_phase_flag19,
 	msm_mux_reserved58,
 	msm_mux_phase_flag20,
@@ -731,10 +722,8 @@
 	msm_mux_reserved82,
 	msm_mux_reserved83,
 	msm_mux_reserved84,
-	msm_mux_pcie1_pwrfault,
 	msm_mux_qup5,
 	msm_mux_reserved85,
-	msm_mux_pcie1_mrl,
 	msm_mux_reserved86,
 	msm_mux_reserved87,
 	msm_mux_reserved88,
@@ -772,6 +761,7 @@
 	msm_mux_reserved95,
 	msm_mux_tsif2_sync,
 	msm_mux_sdc40,
+	msm_mux_phase_flag3,
 	msm_mux_NA,
 };
 
@@ -781,19 +771,24 @@
 	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
 	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
 	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
-	"gpio36", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
-	"gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", "gpio51",
-	"gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57", "gpio64",
-	"gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
-	"gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio81",
-	"gpio82", "gpio83", "gpio84", "gpio87", "gpio88", "gpio89", "gpio90",
-	"gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
-	"gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
-	"gpio109", "gpio110", "gpio111", "gpio112", "gpio114", "gpio115",
-	"gpio116", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
-	"gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
-	"gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
-	"gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+	"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+	"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+	"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+	"gpio147", "gpio148", "gpio149",
 };
 static const char * const qup0_groups[] = {
 	"gpio0", "gpio1", "gpio2", "gpio3",
@@ -1075,9 +1070,6 @@
 static const char * const qup14_groups[] = {
 	"gpio31", "gpio32", "gpio33", "gpio34",
 };
-static const char * const phase_flag3_groups[] = {
-	"gpio96",
-};
 static const char * const reserved96_groups[] = {
 	"gpio96",
 };
@@ -1109,7 +1101,7 @@
 	"gpio101",
 };
 static const char * const pci_e1_groups[] = {
-	"gpio102", "gpio103", "gpio104",
+	"gpio102", "gpio103",
 };
 static const char * const prng_rosc_groups[] = {
 	"gpio102",
@@ -1126,9 +1118,6 @@
 static const char * const reserved104_groups[] = {
 	"gpio104",
 };
-static const char * const pcie1_forceon_groups[] = {
-	"gpio105",
-};
 static const char * const uim2_data_groups[] = {
 	"gpio105",
 };
@@ -1138,27 +1127,18 @@
 static const char * const reserved105_groups[] = {
 	"gpio105",
 };
-static const char * const pcie1_pwren_groups[] = {
-	"gpio106",
-};
 static const char * const uim2_clk_groups[] = {
 	"gpio106",
 };
 static const char * const reserved106_groups[] = {
 	"gpio106",
 };
-static const char * const pcie1_auxen_groups[] = {
-	"gpio107",
-};
 static const char * const uim2_reset_groups[] = {
 	"gpio107",
 };
 static const char * const reserved107_groups[] = {
 	"gpio107",
 };
-static const char * const pcie1_button_groups[] = {
-	"gpio108",
-};
 static const char * const uim2_present_groups[] = {
 	"gpio108",
 };
@@ -1189,9 +1169,6 @@
 static const char * const reserved112_groups[] = {
 	"gpio112",
 };
-static const char * const pcie1_prsnt2_groups[] = {
-	"gpio113",
-};
 static const char * const uim_batt_groups[] = {
 	"gpio113",
 };
@@ -1259,9 +1236,6 @@
 static const char * const reserved125_groups[] = {
 	"gpio125",
 };
-static const char * const sd_card_groups[] = {
-	"gpio126",
-};
 static const char * const reserved126_groups[] = {
 	"gpio126",
 };
@@ -1380,7 +1354,7 @@
 	"gpio34",
 };
 static const char * const pci_e0_groups[] = {
-	"gpio35", "gpio36", "gpio37",
+	"gpio35", "gpio36",
 };
 static const char * const jitter_bist_groups[] = {
 	"gpio35",
@@ -1439,9 +1413,6 @@
 static const char * const reserved44_groups[] = {
 	"gpio44",
 };
-static const char * const bt_reset_groups[] = {
-	"gpio45",
-};
 static const char * const qup6_groups[] = {
 	"gpio45", "gpio46", "gpio47", "gpio48",
 };
@@ -1514,11 +1485,6 @@
 static const char * const reserved57_groups[] = {
 	"gpio57",
 };
-static const char * const ssc_irq_groups[] = {
-	"gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio78",
-	"gpio79", "gpio80", "gpio117", "gpio118", "gpio119", "gpio120",
-	"gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
-};
 static const char * const phase_flag19_groups[] = {
 	"gpio58",
 };
@@ -1693,18 +1659,12 @@
 static const char * const reserved84_groups[] = {
 	"gpio84",
 };
-static const char * const pcie1_pwrfault_groups[] = {
-	"gpio85",
-};
 static const char * const qup5_groups[] = {
 	"gpio85", "gpio86", "gpio87", "gpio88",
 };
 static const char * const reserved85_groups[] = {
 	"gpio85",
 };
-static const char * const pcie1_mrl_groups[] = {
-	"gpio86",
-};
 static const char * const reserved86_groups[] = {
 	"gpio86",
 };
@@ -1816,6 +1776,9 @@
 static const char * const sdc40_groups[] = {
 	"gpio96",
 };
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
 
 static const struct msm_function sdm845_functions[] = {
 	FUNCTION(gpio),
@@ -1912,7 +1875,6 @@
 	FUNCTION(reserved30),
 	FUNCTION(qup11),
 	FUNCTION(qup14),
-	FUNCTION(phase_flag3),
 	FUNCTION(reserved96),
 	FUNCTION(ldo_en),
 	FUNCTION(reserved97),
@@ -1929,17 +1891,13 @@
 	FUNCTION(phase_flag5),
 	FUNCTION(reserved103),
 	FUNCTION(reserved104),
-	FUNCTION(pcie1_forceon),
 	FUNCTION(uim2_data),
 	FUNCTION(qup13),
 	FUNCTION(reserved105),
-	FUNCTION(pcie1_pwren),
 	FUNCTION(uim2_clk),
 	FUNCTION(reserved106),
-	FUNCTION(pcie1_auxen),
 	FUNCTION(uim2_reset),
 	FUNCTION(reserved107),
-	FUNCTION(pcie1_button),
 	FUNCTION(uim2_present),
 	FUNCTION(reserved108),
 	FUNCTION(uim1_data),
@@ -1950,7 +1908,6 @@
 	FUNCTION(reserved111),
 	FUNCTION(uim1_present),
 	FUNCTION(reserved112),
-	FUNCTION(pcie1_prsnt2),
 	FUNCTION(uim_batt),
 	FUNCTION(edp_hot),
 	FUNCTION(reserved113),
@@ -1973,7 +1930,6 @@
 	FUNCTION(reserved123),
 	FUNCTION(reserved124),
 	FUNCTION(reserved125),
-	FUNCTION(sd_card),
 	FUNCTION(reserved126),
 	FUNCTION(reserved127),
 	FUNCTION(reserved128),
@@ -2033,7 +1989,6 @@
 	FUNCTION(reserved42),
 	FUNCTION(reserved43),
 	FUNCTION(reserved44),
-	FUNCTION(bt_reset),
 	FUNCTION(qup6),
 	FUNCTION(reserved45),
 	FUNCTION(reserved46),
@@ -2058,7 +2013,6 @@
 	FUNCTION(gcc_gp1),
 	FUNCTION(phase_flag18),
 	FUNCTION(reserved57),
-	FUNCTION(ssc_irq),
 	FUNCTION(phase_flag19),
 	FUNCTION(reserved58),
 	FUNCTION(phase_flag20),
@@ -2117,10 +2071,8 @@
 	FUNCTION(reserved82),
 	FUNCTION(reserved83),
 	FUNCTION(reserved84),
-	FUNCTION(pcie1_pwrfault),
 	FUNCTION(qup5),
 	FUNCTION(reserved85),
-	FUNCTION(pcie1_mrl),
 	FUNCTION(reserved86),
 	FUNCTION(reserved87),
 	FUNCTION(reserved88),
@@ -2158,6 +2110,7 @@
 	FUNCTION(reserved95),
 	FUNCTION(tsif2_sync),
 	FUNCTION(sdc40),
+	FUNCTION(phase_flag3),
 };
 
 static const struct msm_pingroup sdm845_groups[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index af4d4c8..f5d8227 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -3546,16 +3546,32 @@
 	ipa_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
 	if (ipa_ctx->ipa_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa_ctx->ipa_active_clients.cnt++;
+		ipa2_active_clients_log_inc(&log_info, false);
+		ipa_active_clients_unlock();
+
 		clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
 		if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
 			if (msm_bus_scale_client_update_request(
 			    ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
 				WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG("clocks are gated, not setting rate\n");
+		 ipa_active_clients_unlock();
 	}
-	ipa_active_clients_unlock();
 	IPADBG("Done\n");
+
 	return 0;
 }
 
@@ -3679,6 +3695,7 @@
 				 * pipe will be unsuspended as part of
 				 * enabling IPA clocks
 				 */
+				mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
 				if (!atomic_read(
 					&ipa_ctx->sps_pm.dec_clients)
 					) {
@@ -3691,6 +3708,7 @@
 						1);
 					ipa_sps_process_irq_schedule_rel();
 				}
+				mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
 			} else {
 				resource = ipa2_get_rm_resource_from_ep(i);
 				res = ipa_rm_request_resource_with_timer(
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index e474a40..b60c7a6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1480,17 +1480,24 @@
 
 void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
 {
+	struct ipa_flt_tbl *tbl;
 	struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
 
 	mutex_lock(&ipa_ctx->lock);
 	if (ep->dflt_flt4_rule_hdl) {
+		tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
 		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
 		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt4_rule_hdl = 0;
 	}
 	if (ep->dflt_flt6_rule_hdl) {
+		tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
 		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
 		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt6_rule_hdl = 0;
 	}
 	mutex_unlock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index cd575fe..5568f8b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,6 +51,7 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_BAM_STOP_MAX_RETRY 10
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 01eea36..9a3c146 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -591,6 +591,7 @@
 {
 	int index;
 	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	int retries = 0;
 
 	mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
 
@@ -600,6 +601,7 @@
 		return -EBADF;
 	}
 
+send_cmd:
 	init_completion(&ipa_ctx->uc_ctx.uc_completion);
 
 	ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
@@ -659,6 +661,19 @@
 	}
 
 	if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+		if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR ==
+			ipa_ctx->uc_ctx.uc_status) {
+			retries++;
+			if (retries == IPA_BAM_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+			} else {
+				/* sleep for short period to flush IPA */
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+				goto send_cmd;
+			}
+		}
+
 		IPAERR("Recevied status %u, Expected status %u\n",
 			ipa_ctx->uc_ctx.uc_status, expected_status);
 		ipa_ctx->uc_ctx.pending_cmd = -1;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
index 3bec471..a98d602 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -441,7 +441,7 @@
 
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent
+ * enum ipa_hw_2_cpu_offload_cmd_resp_status -  Values that represent
  * offload related command response status to be sent to CPU.
  */
 enum ipa_hw_2_cpu_offload_cmd_resp_status {
@@ -478,6 +478,47 @@
 };
 
 /**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
  * struct IpaHwSetUpCmd  -
  *
  *
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 128674a..b7815cb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -111,47 +111,6 @@
 };
 
 /**
- * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
- * command response status to be sent to CPU.
- */
-enum ipa_hw_2_cpu_cmd_resp_status {
-	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
-	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
-	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
-	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
-	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
-	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
-	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
-	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
-	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
-	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
-	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
-	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
-	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
-	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
-	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
-	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
-	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
-};
-
-/**
  * enum ipa_hw_wdi_errors - WDI specific error types.
  * @IPA_HW_WDI_ERROR_NONE : No error persists
  * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 83fd2b2..e7b16b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1884,7 +1884,7 @@
 			if (ep_idx == -1)
 				continue;
 
-			gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+			gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
 			if (!gsi_ep_cfg) {
 				IPAERR("failed to get GSI config\n");
 				ipa_assert();
@@ -2362,6 +2362,7 @@
 void ipa3_q6_post_shutdown_cleanup(void)
 {
 	int client_idx;
+	int ep_idx;
 
 	IPADBG_LOW("ENTER\n");
 
@@ -2378,6 +2379,10 @@
 
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
 			if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
 				IPAERR("fail to validate Q6 ch emptiness %d\n",
 					client_idx);
@@ -3569,16 +3574,32 @@
 	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
 	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+		struct ipa_active_client_logging_info log_info;
+
+		/*
+		 * clk_set_rate should be called with unlocked lock to allow
+		 * clients to get a reference to IPA clock synchronously.
+		 * Hold a reference to IPA clock here to make sure clock
+		 * state does not change during set_rate.
+		 */
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+		ipa3_ctx->ipa3_active_clients.cnt++;
+		ipa3_active_clients_log_inc(&log_info, false);
+		ipa3_active_clients_unlock();
+
 		if (ipa3_clk)
 			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
 		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
 			ipa3_get_bus_vote()))
 			WARN_ON(1);
+		/* remove the vote added here */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	} else {
 		IPADBG_LOW("clocks are gated, not setting rate\n");
+		ipa3_active_clients_unlock();
 	}
-	ipa3_active_clients_unlock();
 	IPADBG_LOW("Done\n");
+
 	return 0;
 }
 
@@ -3620,6 +3641,8 @@
 				 * pipe will be unsuspended as part of
 				 * enabling IPA clocks
 				 */
+				mutex_lock(&ipa3_ctx->transport_pm.
+					transport_pm_mutex);
 				if (!atomic_read(
 					&ipa3_ctx->transport_pm.dec_clients)
 					) {
@@ -3632,6 +3655,8 @@
 					1);
 					ipa3_process_irq_schedule_rel();
 				}
+				mutex_unlock(&ipa3_ctx->transport_pm.
+					transport_pm_mutex);
 			} else {
 				resource = ipa3_get_rm_resource_from_ep(i);
 				res =
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 796103f..0b8115f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -239,6 +239,8 @@
 	struct gsi_xfer_elem xfer_elem;
 	int i;
 	int aggr_active_bitmap = 0;
+	bool pipe_suspended = false;
+	struct ipa_ep_cfg_ctrl ctrl;
 
 	IPADBG("Applying reset channel with open aggregation frame WA\n");
 	ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
@@ -265,6 +267,15 @@
 	if (result)
 		return -EFAULT;
 
+	ipahal_read_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, &ctrl);
+	if (ctrl.ipa_ep_suspend) {
+		IPADBG("pipe is suspended, remove suspend\n");
+		pipe_suspended = true;
+		ctrl.ipa_ep_suspend = false;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
+
 	/* Start channel and put 1 Byte descriptor on it */
 	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
 	if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -324,6 +335,13 @@
 	 */
 	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
 
+	if (pipe_suspended) {
+		IPADBG("suspend the pipe again\n");
+		ctrl.ipa_ep_suspend = true;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
+
 	/* Restore channels properties */
 	result = ipa3_restore_channel_properties(ep, &orig_chan_props,
 		&orig_chan_scratch);
@@ -338,6 +356,12 @@
 	ipa3_stop_gsi_channel(clnt_hdl);
 	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
 start_chan_fail:
+	if (pipe_suspended) {
+		IPADBG("suspend the pipe again\n");
+		ctrl.ipa_ep_suspend = true;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+			clnt_hdl, &ctrl);
+	}
 	ipa3_restore_channel_properties(ep, &orig_chan_props,
 		&orig_chan_scratch);
 restore_props_fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9d25e4a..faa47d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1004,6 +1004,7 @@
 	struct ipa3_ep_context *ep;
 	int empty;
 	int result;
+	int i;
 
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1038,13 +1039,23 @@
 	if (IPA_CLIENT_IS_CONS(ep->client))
 		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
 	flush_workqueue(ep->sys->wq);
-	result = ipa3_stop_gsi_channel(clnt_hdl);
+	/* channel stop might fail on timeout if IPA is busy */
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result == GSI_STATUS_SUCCESS)
+			break;
+
+		if (result != -GSI_STATUS_AGAIN &&
+		    result != -GSI_STATUS_TIMED_OUT)
+			break;
+	}
+
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("GSI stop chan err: %d.\n", result);
 		ipa_assert();
 		return result;
 	}
-	result = gsi_reset_channel(ep->gsi_chan_hdl);
+	result = ipa3_reset_gsi_channel(clnt_hdl);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("Failed to reset chan: %d.\n", result);
 		ipa_assert();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 0cc1206..ff763c4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1389,16 +1389,23 @@
 void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
 {
 	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+	struct ipa3_flt_tbl *tbl;
 
 	mutex_lock(&ipa3_ctx->lock);
 	if (ep->dflt_flt4_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
 		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
 		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt4_rule_hdl = 0;
 	}
 	if (ep->dflt_flt6_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
 		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
 		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt6_rule_hdl = 0;
 	}
 	mutex_unlock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 19c3de4a..73738bf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -53,6 +53,8 @@
 static bool workqueues_stopped;
 static bool ipa3_modem_init_cmplt;
 static bool first_time_handshake;
+struct mutex ipa3_qmi_lock;
+
 /* QMI A5 service */
 
 static struct msg_desc ipa3_indication_reg_req_desc = {
@@ -610,12 +612,17 @@
 		req->filter_spec_ex_list_len);
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
-		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
-			req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
 	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
@@ -655,12 +662,17 @@
 		req->filter_spec_ex_list_len);
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
-		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
-		req, sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
-	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len =
 		QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01;
@@ -796,12 +808,17 @@
 		return -EINVAL;
 	}
 
-	/* cache the qmi_filter_request */
-	memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
-		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
-		req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
-	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
-	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len =
 	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
@@ -1339,3 +1356,13 @@
 		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
 }
 
+void ipa3_qmi_init(void)
+{
+	mutex_init(&ipa3_qmi_lock);
+}
+
+void ipa3_qmi_cleanup(void)
+{
+	mutex_destroy(&ipa3_qmi_lock);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 4fde261..6cd82f8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -204,6 +204,10 @@
 
 void ipa3_q6_handshake_complete(bool ssr_bootup);
 
+void ipa3_qmi_init(void);
+
+void ipa3_qmi_cleanup(void);
+
 #else /* CONFIG_RMNET_IPA3 */
 
 static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
@@ -316,6 +320,14 @@
 
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
+static inline void ipa3_qmi_init(void)
+{
+}
+
+static inline void ipa3_qmi_cleanup(void)
+{
+}
+
 #endif /* CONFIG_RMNET_IPA3 */
 
 #endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5a38db3..6321ca9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1703,10 +1703,11 @@
 const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	(enum ipa_client_type client)
 {
-	if (client >= IPA_CLIENT_MAX || client < 0) {
-		IPAERR("Bad client number! client =%d\n", client);
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(client);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED)
 		return NULL;
-	}
 
 	return &(ipa3_ep_mapping[ipa3_get_hw_type_index()]
 		[client].ipa_gsi_ep_info);
@@ -4352,21 +4353,30 @@
 
 	memset(&mem, 0, sizeof(mem));
 
-	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
-		IPADBG("Calling gsi_stop_channel\n");
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
 		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		IPADBG("gsi_stop_channel returned %d\n", res);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
+		goto end_sequence;
+	}
+
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
 		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
 			goto end_sequence;
 
-		if (IPA_CLIENT_IS_CONS(ep->client)) {
-			IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
-			/* Send a 1B packet DMA_TASK to IPA and try again */
-			res = ipa3_inject_dma_task_for_gsi();
-			if (res) {
-				IPAERR("Failed to inject DMA TASk for GSI\n");
-				goto end_sequence;
-			}
+		IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+		/* Send a 1B packet DMA_TASK to IPA and try again */
+		res = ipa3_inject_dma_task_for_gsi();
+		if (res) {
+			IPAERR("Failed to inject DMA TASk for GSI\n");
+			goto end_sequence;
 		}
 
 		/* sleep for short period to flush IPA */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 78fd90b..d369e82 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -649,6 +649,21 @@
 		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
 }
 
+static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	ep_ctrl->ipa_ep_suspend =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >>
+			IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT);
+
+	ep_ctrl->ipa_ep_delay =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >>
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
+}
+
 static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1059,7 +1074,8 @@
 		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
 		0x0000080C, 0x70},
 	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
-		ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+		ipareg_construct_endp_init_ctrl_n,
+		ipareg_parse_endp_init_ctrl_n,
 		0x00000800, 0x70},
 	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = {
 		ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 56e7718..a15bd04 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3206,6 +3206,9 @@
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
+
+	ipa3_qmi_init();
+
 	/* Register for Modem SSR */
 	rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
 			SUBSYS_MODEM,
@@ -3219,7 +3222,7 @@
 static void __exit ipa3_wwan_cleanup(void)
 {
 	int ret;
-
+	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 47da1b3..4e9bd64 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -36,7 +36,7 @@
 #define WIGIG_VENDOR (0x1ae9)
 #define WIGIG_DEVICE (0x0310)
 
-#define SMMU_BASE	0x10000000 /* Device address range base */
+#define SMMU_BASE	0x20000000 /* Device address range base */
 #define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
 
 #define WIGIG_ENABLE_DELAY	50
@@ -93,9 +93,12 @@
 
 	/* SMMU */
 	bool use_smmu; /* have SMMU enabled? */
-	int smmu_bypass;
+	int smmu_s1_en;
 	int smmu_fast_map;
+	int smmu_coherent;
 	struct dma_iommu_mapping *mapping;
+	u32 smmu_base;
+	u32 smmu_size;
 
 	/* bus frequency scaling */
 	struct msm_bus_scale_pdata *bus_scale;
@@ -638,15 +641,20 @@
 {
 	int atomic_ctx = 1;
 	int rc;
+	int force_pt_coherent = 1;
+	int smmu_bypass = !ctx->smmu_s1_en;
+	dma_addr_t iova_base = 0;
+	dma_addr_t iova_end =  ctx->smmu_base + ctx->smmu_size - 1;
+	struct iommu_domain_geometry geometry;
 
 	if (!ctx->use_smmu)
 		return 0;
 
-	dev_info(ctx->dev, "Initialize SMMU, bypass = %d, fastmap = %d\n",
-		 ctx->smmu_bypass, ctx->smmu_fast_map);
+	dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
+		 smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
 
 	ctx->mapping = arm_iommu_create_mapping(&platform_bus_type,
-						SMMU_BASE, SMMU_SIZE);
+						ctx->smmu_base, ctx->smmu_size);
 	if (IS_ERR_OR_NULL(ctx->mapping)) {
 		rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
 		dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
@@ -662,23 +670,50 @@
 		goto release_mapping;
 	}
 
-	if (ctx->smmu_bypass) {
+	if (smmu_bypass) {
 		rc = iommu_domain_set_attr(ctx->mapping->domain,
 					   DOMAIN_ATTR_S1_BYPASS,
-					   &ctx->smmu_bypass);
+					   &smmu_bypass);
 		if (rc) {
 			dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
 				rc);
 			goto release_mapping;
 		}
-	} else if (ctx->smmu_fast_map) {
-		rc = iommu_domain_set_attr(ctx->mapping->domain,
-					   DOMAIN_ATTR_FAST,
-					   &ctx->smmu_fast_map);
-		if (rc) {
-			dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
-				rc);
-			goto release_mapping;
+	} else {
+		/* Set dma-coherent and page table coherency */
+		if (ctx->smmu_coherent) {
+			arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+				   DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+				   &force_pt_coherent);
+			if (rc) {
+				dev_err(ctx->dev,
+					"Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
+		}
+
+		if (ctx->smmu_fast_map) {
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+						   DOMAIN_ATTR_FAST,
+						   &ctx->smmu_fast_map);
+			if (rc) {
+				dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
+			memset(&geometry, 0, sizeof(geometry));
+			geometry.aperture_start = iova_base;
+			geometry.aperture_end = iova_end;
+			rc = iommu_domain_set_attr(ctx->mapping->domain,
+						   DOMAIN_ATTR_GEOMETRY,
+						   &geometry);
+			if (rc) {
+				dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
+					rc);
+				goto release_mapping;
+			}
 		}
 	}
 
@@ -729,6 +764,25 @@
 	return rc;
 }
 
+static int msm_11ad_ssr_copy_ramdump(struct msm11ad_ctx *ctx)
+{
+	if (ctx->rops.ramdump && ctx->wil_handle) {
+		int rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
+					   WIGIG_RAMDUMP_SIZE);
+		if (rc) {
+			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	ctx->dump_data.version = WIGIG_DUMP_FORMAT_VER;
+	strlcpy(ctx->dump_data.name, WIGIG_SUBSYS_NAME,
+		sizeof(ctx->dump_data.name));
+
+	ctx->dump_data.magic = WIGIG_DUMP_MAGIC_VER_V1;
+	return 0;
+}
+
 static int msm_11ad_ssr_ramdump(int enable, const struct subsys_desc *subsys)
 {
 	int rc;
@@ -745,13 +799,10 @@
 	if (!enable)
 		return 0;
 
-	if (ctx->rops.ramdump && ctx->wil_handle) {
-		rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
-				       WIGIG_RAMDUMP_SIZE);
-		if (rc) {
-			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
-			return -EINVAL;
-		}
+	if (!ctx->recovery_in_progress) {
+		rc = msm_11ad_ssr_copy_ramdump(ctx);
+		if (rc)
+			return rc;
 	}
 
 	memset(&segment, 0, sizeof(segment));
@@ -763,7 +814,6 @@
 
 static void msm_11ad_ssr_crash_shutdown(const struct subsys_desc *subsys)
 {
-	int rc;
 	struct platform_device *pdev;
 	struct msm11ad_ctx *ctx;
 
@@ -775,19 +825,8 @@
 		return;
 	}
 
-	if (ctx->rops.ramdump && ctx->wil_handle) {
-		rc = ctx->rops.ramdump(ctx->wil_handle, ctx->ramdump_addr,
-				       WIGIG_RAMDUMP_SIZE);
-		if (rc)
-			dev_err(ctx->dev, "ramdump failed : %d\n", rc);
-		/* continue */
-	}
-
-	ctx->dump_data.version = WIGIG_DUMP_FORMAT_VER;
-	strlcpy(ctx->dump_data.name, WIGIG_SUBSYS_NAME,
-		sizeof(ctx->dump_data.name));
-
-	ctx->dump_data.magic = WIGIG_DUMP_MAGIC_VER_V1;
+	if (!ctx->recovery_in_progress)
+		(void)msm_11ad_ssr_copy_ramdump(ctx);
 }
 
 static void msm_11ad_ssr_deinit(struct msm11ad_ctx *ctx)
@@ -866,7 +905,7 @@
 static void msm_11ad_init_cpu_boost(struct msm11ad_ctx *ctx)
 {
 	unsigned int minfreq = 0, maxfreq = 0, freq;
-	int i, boost_cpu;
+	int i, boost_cpu = 0;
 
 	for_each_possible_cpu(i) {
 		freq = cpufreq_quick_get_max(i);
@@ -900,6 +939,7 @@
 	struct device_node *of_node = dev->of_node;
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
+	u32 smmu_mapping[2];
 	int rc;
 	u32 val;
 
@@ -954,8 +994,27 @@
 	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
 
-	ctx->smmu_bypass = 1;
-	ctx->smmu_fast_map = 0;
+	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
+	if (ctx->smmu_s1_en) {
+		ctx->smmu_fast_map = of_property_read_bool(
+						of_node, "qcom,smmu-fast-map");
+		ctx->smmu_coherent = of_property_read_bool(
+						of_node, "qcom,smmu-coherent");
+	}
+	rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
+			smmu_mapping, 2);
+	if (rc) {
+		dev_err(ctx->dev,
+			"Failed to read base/size smmu addresses %d, fallback to default\n",
+			rc);
+		ctx->smmu_base = SMMU_BASE;
+		ctx->smmu_size = SMMU_SIZE;
+	} else {
+		ctx->smmu_base = smmu_mapping[0];
+		ctx->smmu_size = smmu_mapping[1];
+	}
+	dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
+		ctx->smmu_base, ctx->smmu_size);
 
 	/*== execute ==*/
 	/* turn device on */
@@ -1266,6 +1325,7 @@
 
 	if (ctx->subsys) {
 		dev_info(ctx->dev, "SSR requested\n");
+		(void)msm_11ad_ssr_copy_ramdump(ctx);
 		ctx->recovery_in_progress = true;
 		rc = subsystem_restart_dev(ctx->subsys);
 		if (rc) {
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 79ea712..362375f 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -20,6 +20,16 @@
 	  The driver reports the charger status via the power supply framework.
 	  A charger status change triggers an IRQ via the device STAT pin.
 
+config SMB1355_SLAVE_CHARGER
+	tristate "SMB1355 Slave Battery Charger"
+	depends on MFD_I2C_PMIC
+	help
+	  Say Y to include support for SMB1355 Battery Charger.
+	  SMB1355 is a single phase 5A battery charger.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
 config SMB1351_USB_CHARGER
 	tristate "smb1351 usb charger (with VBUS detection)"
 	depends on I2C
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 171444f..bc19b24 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_QPNP_FG_GEN3)     += qpnp-fg-gen3.o fg-memif.o fg-util.o
 obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
+obj-$(CONFIG_SMB1355_SLAVE_CHARGER)   += smb1355-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
 obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
 obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 806460f..27047b4 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3366,6 +3366,16 @@
 		return rc;
 	}
 
+	if (is_debug_batt_id(chip)) {
+		val = ESR_NO_PULL_DOWN;
+		rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+			ESR_PULL_DOWN_MODE_MASK, val);
+		if (rc < 0) {
+			pr_err("Error in writing esr_pull_down, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e802fbd..f8a7555 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -858,7 +858,6 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
 	POWER_SUPPLY_PROP_CHARGER_TEMP,
 	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
@@ -878,6 +877,8 @@
 	POWER_SUPPLY_PROP_DIE_HEALTH,
 	POWER_SUPPLY_PROP_RERUN_AICL,
 	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -907,9 +908,12 @@
 	case POWER_SUPPLY_PROP_CAPACITY:
 		rc = smblib_get_prop_batt_capacity(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_get_prop_system_temp_level(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		rc = smblib_get_prop_system_temp_level_max(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
 		/* do not query RRADC if charger is not present */
 		rc = smblib_get_prop_usb_present(chg, &pval);
@@ -1004,7 +1008,7 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smblib_set_prop_input_suspend(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_set_prop_system_temp_level(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 7d5a8bd..f4ae415 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1635,6 +1635,13 @@
 	return 0;
 }
 
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->thermal_levels;
+	return 0;
+}
+
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val)
 {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b0d84f0..5409166 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -398,6 +398,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_system_temp_level(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
new file mode 100644
index 0000000..d5fff74
--- /dev/null
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -0,0 +1,675 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "SMB1355: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
+
+#define SMB1355_DEFAULT_FCC_UA 1000000
+
+/* SMB1355 registers, different than mentioned in smb-reg.h */
+
+#define CHGR_BASE	0x1000
+#define BATIF_BASE	0x1200
+#define USBIN_BASE	0x1300
+#define MISC_BASE	0x1600
+
+#define BATTERY_STATUS_2_REG			(CHGR_BASE + 0x0B)
+#define DISABLE_CHARGING_BIT			BIT(3)
+
+#define BATTERY_STATUS_3_REG			(CHGR_BASE + 0x0C)
+#define BATT_GT_PRE_TO_FAST_BIT			BIT(4)
+#define ENABLE_CHARGING_BIT			BIT(3)
+
+#define CHGR_CFG2_REG				(CHGR_BASE + 0x51)
+#define CHG_EN_SRC_BIT				BIT(7)
+#define CHG_EN_POLARITY_BIT			BIT(6)
+
+#define CFG_REG					(CHGR_BASE + 0x53)
+#define CHG_OPTION_PIN_TRIM_BIT			BIT(7)
+#define BATN_SNS_CFG_BIT			BIT(4)
+#define CFG_TAPER_DIS_AFVC_BIT			BIT(3)
+#define BATFET_SHUTDOWN_CFG_BIT			BIT(2)
+#define VDISCHG_EN_CFG_BIT			BIT(1)
+#define VCHG_EN_CFG_BIT				BIT(0)
+
+#define FAST_CHARGE_CURRENT_CFG_REG		(CHGR_BASE + 0x61)
+#define FAST_CHARGE_CURRENT_SETTING_MASK	GENMASK(7, 0)
+
+#define CHGR_BATTOV_CFG_REG			(CHGR_BASE + 0x70)
+#define BATTOV_SETTING_MASK			GENMASK(7, 0)
+
+#define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT			BIT(0)
+
+#define WD_CFG_REG				(MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
+#define BARK_WDOG_INT_EN_BIT			BIT(6)
+#define BITE_WDOG_INT_EN_BIT			BIT(5)
+#define WDOG_IRQ_SFT_BIT			BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
+
+struct smb_chg_param {
+	const char	*name;
+	u16		reg;
+	int		min_u;
+	int		max_u;
+	int		step_u;
+};
+
+struct smb_params {
+	struct smb_chg_param	fcc;
+	struct smb_chg_param	ov;
+};
+
+static struct smb_params v1_params = {
+	.fcc		= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.ov		= {
+		.name	= "battery over voltage",
+		.reg	= CHGR_BATTOV_CFG_REG,
+		.min_u	= 2450000,
+		.max_u	= 5000000,
+		.step_u	= 10000,
+	},
+};
+
+struct smb_irq_info {
+	const char		*name;
+	const irq_handler_t	handler;
+	const bool		wake;
+	int			irq;
+};
+
+struct smb1355 {
+	struct device		*dev;
+	char			*name;
+	struct regmap		*regmap;
+
+	struct smb_params	param;
+
+	struct mutex		write_lock;
+
+	struct power_supply	*parallel_psy;
+	struct pmic_revid_data	*pmic_rev_id;
+};
+
+static bool is_secure(struct smb1355 *chip, int addr)
+{
+	/* assume everything above 0xA0 is secure */
+	return (addr & 0xFF) >= 0xA0;
+}
+
+static int smb1355_read(struct smb1355 *chip, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc;
+
+	rc = regmap_read(chip->regmap, addr, &temp);
+	if (rc >= 0)
+		*val = (u8)temp;
+
+	return rc;
+}
+
+static int smb1355_masked_write(struct smb1355 *chip, u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int smb1355_write(struct smb1355 *chip, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->write_lock);
+
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap, (addr & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write(chip->regmap, addr, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int smb1355_set_charge_param(struct smb1355 *chip,
+			struct smb_chg_param *param, int val_u)
+{
+	int rc;
+	u8 val_raw;
+
+	if (val_u > param->max_u || val_u < param->min_u) {
+		pr_err("%s: %d is out of range [%d, %d]\n",
+			param->name, val_u, param->min_u, param->max_u);
+		return -EINVAL;
+	}
+
+	val_raw = (val_u - param->min_u) / param->step_u;
+
+	rc = smb1355_write(chip, param->reg, val_raw);
+	if (rc < 0) {
+		pr_err("%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+			param->name, val_raw, param->reg, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb1355_get_charge_param(struct smb1355 *chip,
+			struct smb_chg_param *param, int *val_u)
+{
+	int rc;
+	u8 val_raw;
+
+	rc = smb1355_read(chip, param->reg, &val_raw);
+	if (rc < 0) {
+		pr_err("%s: Couldn't read from 0x%04x rc=%d\n",
+			param->name, param->reg, rc);
+		return rc;
+	}
+
+	*val_u = val_raw * param->step_u + param->min_u;
+
+	return rc;
+}
+
+static irqreturn_t smb1355_handle_chg_state_change(int irq, void *data)
+{
+	struct smb1355 *chip = data;
+
+	if (chip->parallel_psy)
+		power_supply_changed(chip->parallel_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t smb1355_handle_wdog_bark(int irq, void *data)
+{
+	struct smb1355 *chip = data;
+	int rc;
+
+	rc = smb1355_write(chip, BARK_BITE_WDOG_PET_REG,
+					BARK_BITE_WDOG_PET_BIT);
+	if (rc < 0)
+		pr_err("Couldn't pet the dog rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+/*****************************
+ * PARALLEL PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb1355_parallel_props[] = {
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't read SMB1355_BATTERY_STATUS_3 rc=%d\n", rc);
+		return rc;
+	}
+
+	if (stat & ENABLE_CHARGING_BIT) {
+		if (stat & BATT_GT_PRE_TO_FAST_BIT)
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+		else
+			val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	} else {
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	return rc;
+}
+
+static int smb1355_get_parallel_charging(struct smb1355 *chip, int *disabled)
+{
+	int rc;
+	u8 cfg2;
+
+	rc = smb1355_read(chip, CHGR_CFG2_REG, &cfg2);
+	if (rc < 0) {
+		pr_err("Couldn't read en_cmg_reg rc=%d\n", rc);
+		return rc;
+	}
+
+	if (cfg2 & CHG_EN_SRC_BIT)
+		*disabled = 0;
+	else
+		*disabled = 1;
+
+	return 0;
+}
+
+static int smb1355_parallel_get_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb1355 *chip = power_supply_get_drvdata(psy);
+	u8 stat;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smb1355_get_prop_batt_charge_type(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = smb1355_read(chip, BATTERY_STATUS_3_REG, &stat);
+		if (rc >= 0)
+			val->intval = (bool)(stat & ENABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_PIN_ENABLED:
+		rc = smb1355_read(chip, BATTERY_STATUS_2_REG, &stat);
+		if (rc >= 0)
+			val->intval = !(stat & DISABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1355_get_parallel_charging(chip, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smb1355_get_charge_param(chip, &chip->param.ov,
+						&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smb1355_get_charge_param(chip, &chip->param.fcc,
+						&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = chip->name;
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		break;
+	default:
+		pr_err_ratelimited("parallel psy get prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb1355_set_parallel_charging(struct smb1355 *chip, bool disable)
+{
+	int rc;
+
+	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+				 disable ? 0 : WDOG_TIMER_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't %s watchdog rc=%d\n",
+		       disable ? "disable" : "enable", rc);
+		disable = true;
+	}
+
+	/*
+	 * Configure charge enable for high polarity and
+	 * When disabling charging set it to cmd register control(cmd bit=0)
+	 * When enabling charging set it to pin control
+	 */
+	rc = smb1355_masked_write(chip, CHGR_CFG2_REG,
+			CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT,
+			disable ? 0 : CHG_EN_SRC_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb1355_parallel_set_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     const union power_supply_propval *val)
+{
+	struct smb1355 *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1355_set_parallel_charging(chip, (bool)val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smb1355_set_charge_param(chip, &chip->param.ov,
+						val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smb1355_set_charge_param(chip, &chip->param.fcc,
+						val->intval);
+		break;
+	default:
+		pr_debug("parallel power supply set prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb1355_parallel_prop_is_writeable(struct power_supply *psy,
+					      enum power_supply_property prop)
+{
+	return 0;
+}
+
+static struct power_supply_desc parallel_psy_desc = {
+	.name			= "parallel",
+	.type			= POWER_SUPPLY_TYPE_PARALLEL,
+	.properties		= smb1355_parallel_props,
+	.num_properties		= ARRAY_SIZE(smb1355_parallel_props),
+	.get_property		= smb1355_parallel_get_prop,
+	.set_property		= smb1355_parallel_set_prop,
+	.property_is_writeable	= smb1355_parallel_prop_is_writeable,
+};
+
+static int smb1355_init_parallel_psy(struct smb1355 *chip)
+{
+	struct power_supply_config parallel_cfg = {};
+
+	parallel_cfg.drv_data = chip;
+	parallel_cfg.of_node = chip->dev->of_node;
+
+	/* change to smb1355's property list */
+	parallel_psy_desc.properties = smb1355_parallel_props;
+	parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+						   &parallel_psy_desc,
+						   &parallel_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel power supply\n");
+		return PTR_ERR(chip->parallel_psy);
+	}
+
+	return 0;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+
+static int smb1355_init_hw(struct smb1355 *chip)
+{
+	int rc;
+
+	/* enable watchdog bark and bite interrupts, and disable the watchdog */
+	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT
+			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+			| BARK_WDOG_INT_EN_BIT,
+			BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable charging when watchdog bites */
+	rc = smb1355_masked_write(chip, SNARL_BARK_BITE_WD_CFG_REG,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable parallel charging path */
+	rc = smb1355_set_parallel_charging(chip, true);
+	if (rc < 0) {
+		pr_err("Couldn't disable parallel path rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize FCC to 0 */
+	rc = smb1355_set_charge_param(chip, &chip->param.fcc, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set 0 FCC rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable parallel current sensing */
+	rc = smb1355_masked_write(chip, CFG_REG,
+				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable parallel current sensing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+static struct smb_irq_info smb1355_irqs[] = {
+	[0] = {
+		.name		= "wdog-bark",
+		.handler	= smb1355_handle_wdog_bark,
+	},
+	[1] = {
+		.name		= "chg-state-change",
+		.handler	= smb1355_handle_chg_state_change,
+		.wake		= true,
+	},
+};
+
+static int smb1355_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb1355_irqs); i++) {
+		if (strcmp(smb1355_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb1355_request_interrupt(struct smb1355 *chip,
+				struct device_node *node,
+				const char *irq_name)
+{
+	int rc = 0, irq, irq_index;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb1355_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb1355_irqs[irq_index].handler)
+		return 0;
+
+	rc = devm_request_threaded_irq(chip->dev, irq, NULL,
+				smb1355_irqs[irq_index].handler,
+				IRQF_ONESHOT, irq_name, chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
+		return rc;
+	}
+
+	if (smb1355_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb1355_request_interrupts(struct smb1355 *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					prop, name) {
+			rc = smb1355_request_interrupt(chip, child, name);
+			if (rc < 0) {
+				pr_err("Couldn't request interrupt %s rc=%d\n",
+					name, rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/*********
+ * PROBE *
+ *********/
+static const struct of_device_id match_table[] = {
+	{
+		.compatible	= "qcom,smb1355",
+	},
+	{ },
+};
+
+static int smb1355_probe(struct platform_device *pdev)
+{
+	struct smb1355 *chip;
+	const struct of_device_id *id;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->param = v1_params;
+	chip->name = "smb1355";
+	mutex_init(&chip->write_lock);
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_device(of_match_ptr(match_table), chip->dev);
+	if (!id) {
+		pr_err("Couldn't find a matching device\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb1355_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb1355_init_parallel_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb1355_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	pr_info("%s probed successfully\n", chip->name);
+	return rc;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb1355_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver smb1355_driver = {
+	.driver	= {
+		.name		= "qcom,smb1355-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe	= smb1355_probe,
+	.remove	= smb1355_remove,
+};
+module_platform_driver(smb1355_driver);
+
+MODULE_DESCRIPTION("QPNP SMB1355 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 9566e24..b1e6a3b 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -177,6 +177,7 @@
 #define CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN	BIT(27)
 
 #define CPR4_REG_MISC				0x700
+#define CPR4_MISC_RESET_STEP_QUOT_LOOP_EN	BIT(2)
 #define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK	GENMASK(23, 20)
 #define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT	20
 #define CPR4_MISC_TEMP_SENSOR_ID_START_MASK	GENMASK(27, 24)
@@ -289,6 +290,10 @@
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK	BIT(16)
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN	BIT(16)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK	BIT(13)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN	BIT(13)
 #define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
 #define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
 
@@ -723,6 +728,11 @@
 	int thread_id = 0;
 	u64 temp;
 
+	if (ctrl->reset_step_quot_loop_en)
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
 	if (ctrl->supports_hw_closed_loop) {
 		if (ctrl->saw_use_unit_mV)
 			pmic_step_size = ctrl->step_volt / 1000;
@@ -1355,6 +1365,11 @@
 		}
 	}
 
+	if (ctrl->reset_step_quot_loop_en)
+		cpr3_masked_write(ctrl, CPR4_REG_MISC,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+				CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
 	if (ctrl->saw_use_unit_mV)
 		pmic_step_size = ctrl->step_volt / 1000;
 	cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
@@ -1438,6 +1453,16 @@
 				  ctrl->acd_adj_down_step_size <<
 				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
 		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK,
+				  (ctrl->acd_notwait_for_cl_settled
+				   ? CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK,
+				  (ctrl->acd_adj_avg_fast_update
+				   ? CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
 				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
 				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
 	}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 570ddfc..a315e46 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -661,6 +661,10 @@
  * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
  *			target quotient adjustment due to an ACD up
  *			recommendation.
+ * @acd_notwait_for_cl_settled: Boolean which indicates ACD down recommendations
+ *			do not need to wait for CPR closed-loop to settle.
+ * @acd_adj_avg_fast_update: Boolean which indicates if CPR should issue
+ *			immediate voltage updates on ACD requests.
  * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
  *			feature.
  * @count_mode:		CPR controller count mode
@@ -756,6 +760,12 @@
  * @panic_notifier:	Notifier block registered to global panic notifier list.
  * @support_ldo300_vreg: Boolean value which indicates that this CPR controller
  *			manages an underlying LDO regulator of type LDO300.
+ * @reset_step_quot_loop_en: Boolean value which indicates that this CPR
+ *			controller should be configured to reset step_quot on
+ *			each loop_en = 0 transition. This configuration allows
+ *			the CPR controller to first use the default step_quot
+ *			and then later switch to the run-time calibrated
+ *			step_quot.
  *
  * This structure contains both configuration and runtime state data.  The
  * elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled,
@@ -822,6 +832,8 @@
 	u32			acd_adj_up_step_limit;
 	u32			acd_adj_down_step_size;
 	u32			acd_adj_up_step_size;
+	bool			acd_notwait_for_cl_settled;
+	bool			acd_adj_avg_fast_update;
 	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
@@ -866,6 +878,7 @@
 	struct cpr3_panic_regs_info *panic_regs_info;
 	struct notifier_block	panic_notifier;
 	bool			support_ldo300_vreg;
+	bool			reset_step_quot_loop_en;
 };
 
 /* Used for rounding voltages to the closest physically available set point. */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 648d396..3035155 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1224,6 +1224,14 @@
 	}
 
 	/*
+	 * Reset step_quot to default on each loop_en = 0 transition is
+	 * optional.
+	 */
+	ctrl->reset_step_quot_loop_en
+		= of_property_read_bool(ctrl->dev->of_node,
+					"qcom,cpr-reset-step-quot-loop-en");
+
+	/*
 	 * Regulator device handles are not necessary for CPRh controllers
 	 * since communication with the regulators is completely managed
 	 * in hardware.
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index b84d9f0..cf7c35d 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -34,9 +34,16 @@
 
 #include "cpr3-regulator.h"
 
-#define MSM8998_KBSS_FUSE_CORNERS	4
-#define SDM660_KBSS_FUSE_CORNERS	5
-#define SDM845_KBSS_FUSE_CORNERS	3
+#define MSM8998_KBSS_FUSE_CORNERS			4
+#define SDM660_KBSS_FUSE_CORNERS			5
+
+#define SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS		4
+#define SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS	3
+/*
+ * This must be set to the larger of SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS and
+ * SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS values.
+ */
+#define SDM845_KBSS_MAX_FUSE_CORNERS			4
 
 /**
  * struct cprh_kbss_fuses - KBSS specific fuse data
@@ -79,7 +86,7 @@
  */
 #define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT	32
 #define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT	16
-#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	8
+#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	16
 
 /*
  * Constants which define the name of each fuse corner.
@@ -146,16 +153,18 @@
 #define CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID	0
 
 static const char * const
-cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_FUSE_CORNERS] = {
+cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		"LowSVS",
 		"SVS_L1",
 		"NOM_L1",
+		"TURBO",
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
 		"SVS",
 		"NOM",
 		"TURBO_L2",
+		"",
 	},
 };
 
@@ -325,17 +334,19 @@
  *		 different fuse rows.
  */
 static const struct cpr3_fuse_param
-sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{66, 52, 55}, {} },
 			{{66, 48, 51}, {} },
 			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			{{66, 52, 55}, {} },
 			{{66, 48, 51}, {} },
 			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -348,17 +359,19 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 10, 15}, {} },
 			{{67,  4,  9}, {} },
 			{{66, 62, 63}, {67,  0,  3}, {} },
+			{{66, 56, 61}, {} },
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			{{68, 47, 52}, {} },
 			{{68, 41, 46}, {} },
 			{{68, 35, 40}, {} },
+			{{68, 29, 34}, {} },
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -371,17 +384,19 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 52, 63}, {} },
 			{{67, 40, 51}, {} },
 			{{67, 28, 39}, {} },
+			{{67, 16, 27}, {} },
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			{{69, 25, 36}, {} },
 			{{69, 13, 24}, {} },
 			{{69,  1, 12}, {} },
+			{{68, 53, 63}, {69,  0,  0}, {} },
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -394,17 +409,19 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_FUSE_CORNERS][2] = {
+sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{} },
 			{{68, 14, 20}, {} },
 			{{68,  7, 13}, {} },
+			{{68,  0,  6}, {} },
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			{{} },
 			{{69, 51, 57}, {} },
 			{{69, 44, 50}, {} },
+			{{69, 37, 43}, {} },
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -539,17 +556,19 @@
  * Open loop voltage fuse reference voltages in microvolts for SDM845
  */
 static const int
-sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_FUSE_CORNERS] = {
+sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			688000,
 			812000,
 			896000,
+			900000,
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			688000,
 			812000,
 			896000,
+			900000,
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -976,7 +995,10 @@
 		break;
 	case SDM845_V1_SOC_ID:
 	case SDM845_V2_SOC_ID:
-		fuse_corners = SDM845_KBSS_FUSE_CORNERS;
+		fuse_corners = vreg->thread->ctrl->ctrl_id
+					== CPRH_KBSS_POWER_CLUSTER_ID
+				? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
+				: SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS;
 		break;
 	default:
 		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -2260,6 +2282,13 @@
 				 rc);
 			return rc;
 		}
+
+		ctrl->acd_notwait_for_cl_settled =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-notwait-for-cl-settled");
+		ctrl->acd_adj_avg_fast_update =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-fast-update");
 	}
 
 	rc = of_property_read_u32(ctrl->dev->of_node,
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index af17066..243b2d1 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -154,16 +154,6 @@
 	else
 		fn = NULL;
 
-	/*
-	 * Forcibly set runtime PM status of request queue to "active" to
-	 * make sure we can again get requests from the queue (see also
-	 * blk_pm_peek_request()).
-	 *
-	 * The resume hook will correct runtime PM status of the disk.
-	 */
-	if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
-		blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
 	if (fn) {
 		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
 
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3f218f5..c5ab1b0 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@
  */
 static int storvsc_timeout = 180;
 
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
 static struct scsi_transport_template *fc_transport_template;
 #endif
@@ -1283,6 +1281,22 @@
 	return ret;
 }
 
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+	/*
+	 * Set blist flag to permit the reading of the VPD pages even when
+	 * the target may claim SPC-2 compliance. MSFT targets currently
+	 * claim SPC-2 compliance while they implement post SPC-2 features.
+	 * With this flag we can correctly handle WRITE_SAME_16 issues.
+	 *
+	 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+	 * still supports REPORT LUN.
+	 */
+	sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+	return 0;
+}
+
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
 
@@ -1298,14 +1312,6 @@
 	sdevice->no_write_same = 1;
 
 	/*
-	 * Add blist flags to permit the reading of the VPD pages even when
-	 * the target may claim SPC-2 compliance. MSFT targets currently
-	 * claim SPC-2 compliance while they implement post SPC-2 features.
-	 * With this patch we can correctly handle WRITE_SAME_16 issues.
-	 */
-	sdevice->sdev_bflags |= msft_blist_flags;
-
-	/*
 	 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
 	 * if the device is a MSFT virtual device.  If the host is
 	 * WIN10 or newer, allow write_same.
@@ -1569,6 +1575,7 @@
 	.eh_host_reset_handler =	storvsc_host_reset_handler,
 	.proc_name =		"storvsc_host",
 	.eh_timed_out =		storvsc_eh_timed_out,
+	.slave_alloc =		storvsc_device_alloc,
 	.slave_configure =	storvsc_device_configure,
 	.cmd_per_lun =		255,
 	.this_id =		-1,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 69e3032..3311380 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -655,3 +655,11 @@
 	  memory location. These messages provide statistical information about
 	  the low power modes that RPM enters. The drivers outputs the message
 	  via a debugfs node.
+
+config QCOM_FORCE_WDOG_BITE_ON_PANIC
+	bool "QCOM force watchdog bite"
+	depends on QCOM_WATCHDOG_V2
+	help
+	  This forces a watchdog bite when the device restarts due to a
+	  kernel panic. On certain MSM SoCs, this provides us
+	  additional debugging information.
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 0c2ba4d..252bd21 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -19,6 +19,9 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
 #include <soc/qcom/cmd-db.h>
 
 #define RESOURCE_ID_LEN 8
@@ -239,20 +242,132 @@
 	return ret < 0 ? 0 : (ent.addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
 }
 
+static void *cmd_db_start(struct seq_file *m, loff_t *pos)
+{
+	struct cmd_db_header *hdr = m->private;
+	int slv_idx, ent_idx;
+	struct entry_header *ent;
+	int total = 0;
+
+	for (slv_idx = 0; slv_idx < MAX_SLV_ID; slv_idx++) {
+
+		if (!hdr->header[slv_idx].cnt)
+			continue;
+		ent_idx = *pos - total;
+		if (ent_idx < hdr->header[slv_idx].cnt)
+			break;
+
+		total += hdr->header[slv_idx].cnt;
+	}
+
+	if (slv_idx == MAX_SLV_ID)
+		return NULL;
+
+	ent = start_addr + hdr->header[slv_idx].header_offset + sizeof(*hdr);
+	return &ent[ent_idx];
+
+}
+
+static void cmd_db_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *cmd_db_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return cmd_db_start(m, pos);
+}
+
+static int cmd_db_seq_show(struct seq_file *m, void *v)
+{
+	struct entry_header *eh = v;
+	struct cmd_db_header *hdr = m->private;
+	char buf[9]  = {0};
+
+	if (!eh)
+		return 0;
+
+	memcpy(buf, &eh->res_id, min(sizeof(eh->res_id), sizeof(buf)));
+
+	seq_printf(m, "Address: 0x%05x, id: %s", eh->addr, buf);
+
+	if (eh->len) {
+		int slv_id = (eh->addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+		u8 aux[32] = {0};
+		int len;
+		int k;
+
+		len = min_t(u32, eh->len, sizeof(aux));
+
+		for (k = 0; k < MAX_SLV_ID; k++) {
+			if (hdr->header[k].slv_id == slv_id)
+				break;
+		}
+
+		if (k == MAX_SLV_ID)
+			return -EINVAL;
+
+		memcpy_fromio(aux, start_addr + hdr->header[k].data_offset
+			+ eh->offset + sizeof(*cmd_db_header), len);
+
+		seq_puts(m, ", aux data: ");
+
+		for (k = 0; k < len; k++)
+			seq_printf(m, "%02x ", aux[k]);
+
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static const struct seq_operations cmd_db_seq_ops = {
+	.start = cmd_db_start,
+	.stop = cmd_db_stop,
+	.next = cmd_db_next,
+	.show = cmd_db_seq_show,
+};
+
+static int cmd_db_file_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &cmd_db_seq_ops);
+	struct seq_file *s = (struct seq_file *)(file->private_data);
+
+	s->private = inode->i_private;
+	return ret;
+}
+
+static const struct file_operations cmd_db_fops = {
+	.owner = THIS_MODULE,
+	.open = cmd_db_file_open,
+	.read = seq_read,
+	.release = seq_release,
+	.llseek = no_llseek,
+};
+
 static int cmd_db_dev_probe(struct platform_device *pdev)
 {
-	struct resource *res;
+	struct resource res;
+	void __iomem *dict;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
+	dict = of_iomap(pdev->dev.of_node, 0);
+	if (!dict) {
 		cmd_db_status = -ENOMEM;
 		goto failed;
 	}
 
-	start_addr = devm_ioremap_resource(&pdev->dev, res);
+	/*
+	 * Read start address and size of the command DB address from
+	 * shared dictionary location
+	 */
+	res.start = readl_relaxed(dict);
+	res.end = res.start + readl_relaxed(dict + 0x4);
+	res.flags = IORESOURCE_MEM;
+	iounmap(dict);
 
-	cmd_db_header = devm_kzalloc(&pdev->dev, sizeof(*cmd_db_header),
-			GFP_KERNEL);
+	start_addr = devm_ioremap_resource(&pdev->dev, &res);
+
+	cmd_db_header = devm_kzalloc(&pdev->dev,
+			sizeof(*cmd_db_header), GFP_KERNEL);
 
 	if (!cmd_db_header) {
 		cmd_db_status = -ENOMEM;
@@ -269,6 +384,10 @@
 	cmd_db_status = 0;
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 
+	if (!debugfs_create_file("cmd_db", 0444, NULL,
+				cmd_db_header, &cmd_db_fops))
+		pr_err("Couldn't create debugfs\n");
+
 	if (cmd_db_is_standalone() == 1)
 		pr_info("Command DB is initialized in standalone mode.\n");
 
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index c837bd8..9810207 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -700,6 +700,7 @@
  * edge:		The G-Link edge name for the channel associated with
  *			this callback data
  * do_cleanup_data:	Structure containing the G-Link SSR do_cleanup message.
+ * cb_kref:		Kref object to maintain cb_data reference.
  */
 struct ssr_notify_data {
 	bool tx_done;
@@ -707,6 +708,7 @@
 	bool responded;
 	const char *edge;
 	struct do_cleanup_msg *do_cleanup_data;
+	struct kref cb_kref;
 };
 
 /**
@@ -741,6 +743,7 @@
 	int notify_list_len;
 	bool link_up;
 	spinlock_t link_up_lock;
+	spinlock_t cb_lock;
 };
 
 /**
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index b24598a..4737288 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -115,6 +115,44 @@
 static atomic_t responses_remaining = ATOMIC_INIT(0);
 static wait_queue_head_t waitqueue;
 
+/**
+ * cb_data_release() - Free cb_data and set to NULL
+ * @kref_ptr:	pointer to kref.
+ *
+ * This function releses cb_data.
+ */
+static inline void cb_data_release(struct kref *kref_ptr)
+{
+	struct ssr_notify_data *cb_data;
+
+	cb_data = container_of(kref_ptr, struct ssr_notify_data, cb_kref);
+	kfree(cb_data);
+}
+
+/**
+ * check_and_get_cb_data() - Try to get reference to kref of cb_data
+ * @ss_info:	pointer to subsystem info structure.
+ *
+ * Return: NULL is cb_data is NULL, pointer to cb_data otherwise
+ */
+static struct ssr_notify_data *check_and_get_cb_data(
+					struct subsys_info *ss_info)
+{
+	struct ssr_notify_data *cb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
+	if (ss_info->cb_data == NULL) {
+		GLINK_SSR_LOG("<SSR> %s: cb_data is NULL\n", __func__);
+		spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+		return 0;
+	}
+	kref_get(&ss_info->cb_data->cb_kref);
+	cb_data = ss_info->cb_data;
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+	return cb_data;
+}
+
 static void rx_done_cb_worker(struct work_struct *work)
 {
 	struct rx_done_ch_work *rx_done_work =
@@ -340,8 +378,10 @@
 
 	if (WARN_ON(!ss_info->cb_data))
 		return;
-	kfree(ss_info->cb_data);
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
+	kref_put(&ss_info->cb_data->cb_kref, cb_data_release);
 	ss_info->cb_data = NULL;
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
 	kfree(close_work);
 }
 
@@ -508,13 +548,18 @@
 			return -ENODEV;
 		}
 		handle = ss_info_channel->handle;
-		ss_leaf_entry->cb_data = ss_info_channel->cb_data;
+		ss_leaf_entry->cb_data = check_and_get_cb_data(
+							ss_info_channel);
+		if (!ss_leaf_entry->cb_data) {
+			GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+			atomic_dec(&responses_remaining);
+			continue;
+		}
 
 		spin_lock_irqsave(&ss_info->link_up_lock, flags);
 		if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
-				!ss_info_channel->cb_data ||
 				!ss_info_channel->link_up ||
-				ss_info_channel->cb_data->event
+				ss_leaf_entry->cb_data->event
 						!= GLINK_CONNECTED) {
 
 			GLINK_SSR_LOG(
@@ -527,6 +572,8 @@
 
 			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
 			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
 			continue;
 		}
 		spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
@@ -537,6 +584,8 @@
 			GLINK_SSR_ERR(
 				"%s %s: Could not allocate do_cleanup_msg\n",
 				"<SSR>", __func__);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
 			return -ENOMEM;
 		}
 
@@ -568,6 +617,8 @@
 						__func__);
 			}
 			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
 			continue;
 		}
 
@@ -597,10 +648,12 @@
 						__func__);
 			}
 			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
 			continue;
 		}
-
 		sequence_number++;
+		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
 	}
 
 	wait_ret = wait_event_timeout(waitqueue,
@@ -609,6 +662,21 @@
 
 	list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
 			notify_list_node) {
+		ss_info_channel =
+			get_info_for_subsystem(ss_leaf_entry->ssr_name);
+		if (ss_info_channel == NULL) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: unable to find subsystem name\n",
+					__func__);
+			continue;
+		}
+
+		ss_leaf_entry->cb_data = check_and_get_cb_data(
+							ss_info_channel);
+		if (!ss_leaf_entry->cb_data) {
+			GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+			continue;
+		}
 		if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
 				&& !ss_leaf_entry->cb_data->responded) {
 			GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
@@ -627,6 +695,7 @@
 
 		if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
 			ss_leaf_entry->cb_data->responded = false;
+		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
 	}
 	complete(&notifications_successful_complete);
 	return 0;
@@ -645,6 +714,7 @@
 	struct glink_open_config open_cfg;
 	struct ssr_notify_data *cb_data = NULL;
 	void *handle = NULL;
+	unsigned long flags;
 
 	if (!ss_info) {
 		GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
@@ -661,7 +731,10 @@
 	cb_data->responded = false;
 	cb_data->event = GLINK_SSR_EVENT_INIT;
 	cb_data->edge = ss_info->edge;
+	spin_lock_irqsave(&ss_info->cb_lock, flags);
 	ss_info->cb_data = cb_data;
+	kref_init(&cb_data->cb_kref);
+	spin_unlock_irqrestore(&ss_info->cb_lock, flags);
 
 	memset(&open_cfg, 0, sizeof(struct glink_open_config));
 
@@ -877,6 +950,7 @@
 	ss_info->link_state_handle = NULL;
 	ss_info->cb_data = NULL;
 	spin_lock_init(&ss_info->link_up_lock);
+	spin_lock_init(&ss_info->cb_lock);
 
 	nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
 	if (!nb) {
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 91d6349..c950367 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -337,8 +337,8 @@
 			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
 				cur_bcm->node_vec[ACTIVE_CTX].vec_a,
 				cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
-			k++;
 			last_tcs = k;
+			k++;
 			cur_bcm->updated = true;
 		}
 	}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index fb3d7d9..b71ce6b 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -75,6 +75,10 @@
 #define MSS_RESTART_ID			0xA
 
 #define MSS_MAGIC			0XAABADEAD
+
+#define MSS_PDC_OFFSET			8
+#define MSS_PDC_MASK			BIT(MSS_PDC_OFFSET)
+
 enum scm_cmd {
 	PAS_MEM_SETUP_CMD = 2,
 };
@@ -204,6 +208,33 @@
 		clk_disable_unprepare(drv->ahb_clk);
 }
 
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+	u32 val = 0;
+
+	if (drv->pdc_sync) {
+		val = readl_relaxed(drv->pdc_sync);
+		if (pdc_sync)
+			val |= MSS_PDC_MASK;
+		else
+			val &= ~MSS_PDC_MASK;
+		writel_relaxed(val, drv->pdc_sync);
+		/* Ensure PDC is written before next write */
+		wmb();
+		udelay(2);
+	}
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+	if (drv->alt_reset) {
+		writel_relaxed(val, drv->alt_reset);
+		/* Ensure alt reset is written before restart reg */
+		wmb();
+		udelay(2);
+	}
+}
+
 static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
 {
 	int ret = 0;
@@ -235,6 +266,32 @@
 	return ret;
 }
 
+static int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	pil_mss_pdc_sync(drv, 1);
+	pil_mss_alt_reset(drv, 1);
+	ret = pil_mss_restart_reg(drv, true);
+
+	return ret;
+}
+
+static int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		return ret;
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	pil_mss_alt_reset(drv, 0);
+	pil_mss_pdc_sync(drv, false);
+
+	return ret;
+}
+
 static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
 {
 	struct device *dev = drv->desc.dev;
@@ -304,7 +361,10 @@
 									ret);
 	}
 
-	ret = pil_mss_restart_reg(drv, 1);
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
 
 	if (drv->is_booted) {
 		pil_mss_disable_clks(drv);
@@ -450,6 +510,7 @@
 {
 	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
 	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	u32 debug_val;
 	int ret;
 
 	if (drv->mba_dp_phys)
@@ -463,15 +524,22 @@
 	if (ret)
 		goto err_power;
 
-	/* Deassert reset to subsystem and wait for propagation */
-	ret = pil_mss_restart_reg(drv, 0);
-	if (ret)
-		goto err_restart;
-
 	ret = pil_mss_enable_clks(drv);
 	if (ret)
 		goto err_clks;
 
+	/* Save state of modem debug register before full reset */
+	debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+
+	/* Assert reset to subsystem */
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
+	if (ret)
+		goto err_restart;
+
+	writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
 	if (modem_dbg_cfg)
 		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
 
@@ -519,12 +587,11 @@
 
 err_q6v5_reset:
 	modem_log_rmb_regs(drv->rmb_base);
+err_restart:
 	pil_mss_disable_clks(drv);
 	if (drv->ahb_clk_vote)
 		clk_disable_unprepare(drv->ahb_clk);
 err_clks:
-	pil_mss_restart_reg(drv, 1);
-err_restart:
 	pil_mss_power_down(drv);
 err_power:
 	return ret;
@@ -582,6 +649,7 @@
 		}
 		drv->dp_size = dp_fw->size;
 		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
 	}
 
 	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index bbde4b6..df0c609c 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -284,6 +284,20 @@
 	if (!q6->restart_reg)
 		return -ENOMEM;
 
+	q6->pdc_sync = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+	if (res) {
+		q6->pdc_sync = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
+	q6->alt_reset = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+	if (res) {
+		q6->alt_reset = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
 	q6->vreg = NULL;
 
 	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 1725253..9b4c811 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -44,6 +44,8 @@
 	void __iomem *axi_halt_mss;
 	void __iomem *axi_halt_nc;
 	void __iomem *restart_reg;
+	void __iomem *pdc_sync;
+	void __iomem *alt_reset;
 	struct regulator *vreg;
 	struct regulator *vreg_cx;
 	struct regulator *vreg_mx;
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 8c5b0d0..9fdd63a 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -1,7 +1,5 @@
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
-obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o
+obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o
 obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
 obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
 obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c
deleted file mode 100644
index 5c296f66..0000000
--- a/drivers/soc/qcom/qdsp6v2/apr_tal.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* Copyright (c) 2010-2011, 2013-2014, 2016-2017 The Linux Foundation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <soc/qcom/smd.h>
-#include <linux/qdsp6v2/apr_tal.h>
-
-static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-};
-
-struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-
-int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int w_len;
-	unsigned long flags;
-
-	spin_lock_irqsave(&apr_ch->w_lock, flags);
-	if (smd_write_avail(apr_ch->ch) < len) {
-		spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-		return -EAGAIN;
-	}
-
-	w_len = smd_write(apr_ch->ch, data, len);
-	spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-
-	pr_debug("apr_tal:w_len = %d\n", w_len);
-
-	if (w_len != len) {
-		pr_err("apr_tal: Error in write\n");
-		return -ENETRESET;
-	}
-	return w_len;
-}
-
-int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int rc = 0, retries = 0;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	do {
-		if (rc == -EAGAIN)
-			udelay(50);
-
-		rc = __apr_tal_write(apr_ch, data, pkt_priv, len);
-	} while (rc == -EAGAIN && retries++ < 300);
-
-	if (rc == -EAGAIN)
-		pr_err("apr_tal: TIMEOUT for write\n");
-
-	return rc;
-}
-
-static void apr_tal_notify(void *priv, unsigned int event)
-{
-	struct apr_svc_ch_dev *apr_ch = priv;
-	int len, r_len, sz;
-	int pkt_cnt = 0;
-	unsigned long flags;
-
-	pr_debug("event = %d\n", event);
-	switch (event) {
-	case SMD_EVENT_DATA:
-		pkt_cnt = 0;
-		spin_lock_irqsave(&apr_ch->lock, flags);
-check_pending:
-		len = smd_read_avail(apr_ch->ch);
-		if (len < 0) {
-			pr_err("apr_tal: Invalid Read Event :%d\n", len);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		sz = smd_cur_packet_size(apr_ch->ch);
-		if (sz < 0) {
-			pr_debug("pkt size is zero\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		if (!len && !sz && !pkt_cnt)
-			goto check_write_avail;
-		if (!len) {
-			pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len);
-		if (len != r_len) {
-			pr_err("apr_tal: Invalid Read\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		pkt_cnt++;
-		pr_debug("%d %d %d\n", len, sz, pkt_cnt);
-		if (apr_ch->func)
-			apr_ch->func(apr_ch->data, r_len, apr_ch->priv);
-		goto check_pending;
-check_write_avail:
-		if (smd_write_avail(apr_ch->ch))
-			wake_up(&apr_ch->wait);
-		spin_unlock_irqrestore(&apr_ch->lock, flags);
-		break;
-	case SMD_EVENT_OPEN:
-		pr_debug("apr_tal: SMD_EVENT_OPEN\n");
-		apr_ch->smd_state = 1;
-		wake_up(&apr_ch->wait);
-		break;
-	case SMD_EVENT_CLOSE:
-		pr_debug("apr_tal: SMD_EVENT_CLOSE\n");
-		break;
-	}
-}
-
-int apr_tal_rx_intents_config(struct apr_svc_ch_dev *apr_ch,
-			int num_of_intents, uint32_t size)
-{
-	/* Rx intents configuration is required for Glink
-	 * but not for SMD. No-op for this function.
-	 */
-	return 0;
-}
-
-struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
-				uint32_t dl, apr_svc_cb_fn func, void *priv)
-{
-	int rc;
-
-	if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
-						(dl >= APR_DL_MAX)) {
-		pr_err("apr_tal: Invalid params\n");
-		return NULL;
-	}
-
-	if (apr_svc_ch[dl][dest][clnt].ch) {
-		pr_err("apr_tal: This channel alreday openend\n");
-		return NULL;
-	}
-
-	mutex_lock(&apr_svc_ch[dl][dest][clnt].m_lock);
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].dest,
-			apr_svc_ch[dl][dest][clnt].dest_state,
-				msecs_to_jiffies(APR_OPEN_TIMEOUT_MS));
-		if (rc == 0) {
-			pr_err("apr_tal:open timeout\n");
-			mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-			return NULL;
-		}
-		pr_debug("apr_tal:Wakeup done\n");
-		apr_svc_ch[dl][dest][clnt].dest_state = 0;
-	}
-	rc = smd_named_open_on_edge(svc_names[dest][clnt], dest,
-			&apr_svc_ch[dl][dest][clnt].ch,
-			&apr_svc_ch[dl][dest][clnt],
-			apr_tal_notify);
-	if (rc < 0) {
-		pr_err("apr_tal: smd_open failed %s\n",
-					svc_names[dest][clnt]);
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		return NULL;
-	}
-	rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].wait,
-		(apr_svc_ch[dl][dest][clnt].smd_state == 1), 5 * HZ);
-	if (rc == 0) {
-		pr_err("apr_tal:TIMEOUT for OPEN event\n");
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		apr_tal_close(&apr_svc_ch[dl][dest][clnt]);
-		return NULL;
-	}
-
-	smd_disable_read_intr(apr_svc_ch[dl][dest][clnt].ch);
-
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		apr_svc_ch[dl][dest][clnt].dest_state = 1;
-		pr_debug("apr_tal:Waiting for apr svc init\n");
-		msleep(200);
-		pr_debug("apr_tal:apr svc init done\n");
-	}
-	apr_svc_ch[dl][dest][clnt].smd_state = 0;
-
-	apr_svc_ch[dl][dest][clnt].func = func;
-	apr_svc_ch[dl][dest][clnt].priv = priv;
-	mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-
-	return &apr_svc_ch[dl][dest][clnt];
-}
-
-int apr_tal_close(struct apr_svc_ch_dev *apr_ch)
-{
-	int r;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	mutex_lock(&apr_ch->m_lock);
-	r = smd_close(apr_ch->ch);
-	apr_ch->ch = NULL;
-	apr_ch->func = NULL;
-	apr_ch->priv = NULL;
-	mutex_unlock(&apr_ch->m_lock);
-	return r;
-}
-
-static int apr_smd_probe(struct platform_device *pdev)
-{
-	int dest;
-	int clnt;
-
-	if (pdev->id == APR_DEST_MODEM) {
-		pr_info("apr_tal:Modem Is Up\n");
-		dest = APR_DEST_MODEM;
-		if (!strcmp(pdev->name, "apr_audio_svc"))
-			clnt = APR_CLIENT_AUDIO;
-		else
-			clnt = APR_CLIENT_VOICE;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else if (pdev->id == APR_DEST_QDSP6) {
-		pr_info("apr_tal:Q6 Is Up\n");
-		dest = APR_DEST_QDSP6;
-		clnt = APR_CLIENT_AUDIO;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else
-		pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id);
-
-	return 0;
-}
-
-static struct platform_driver apr_q6_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_audio_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static struct platform_driver apr_modem_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_voice_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init apr_tal_init(void)
-{
-	int i, j, k;
-
-	for (i = 0; i < APR_DL_MAX; i++)
-		for (j = 0; j < APR_DEST_MAX; j++)
-			for (k = 0; k < APR_CLIENT_MAX; k++) {
-				init_waitqueue_head(&apr_svc_ch[i][j][k].wait);
-				init_waitqueue_head(&apr_svc_ch[i][j][k].dest);
-				spin_lock_init(&apr_svc_ch[i][j][k].lock);
-				spin_lock_init(&apr_svc_ch[i][j][k].w_lock);
-				mutex_init(&apr_svc_ch[i][j][k].m_lock);
-			}
-	platform_driver_register(&apr_q6_driver);
-	platform_driver_register(&apr_modem_driver);
-	return 0;
-}
-device_initcall(apr_tal_init);
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
deleted file mode 100644
index f3b1b83..0000000
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ /dev/null
@@ -1,837 +0,0 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/qdsp6v2/apr_tal.h>
-#include <linux/qdsp6v2/apr.h>
-#include <sound/voice_svc.h>
-
-#define MINOR_NUMBER 1
-#define APR_MAX_RESPONSE 10
-#define TIMEOUT_MS 1000
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-
-struct voice_svc_device {
-	struct cdev *cdev;
-	struct device *dev;
-	int major;
-};
-
-struct voice_svc_prvt {
-	void *apr_q6_mvm;
-	void *apr_q6_cvs;
-	uint16_t response_count;
-	struct list_head response_queue;
-	wait_queue_head_t response_wait;
-	spinlock_t response_lock;
-	/*
-	 * This mutex ensures responses are processed in sequential order and
-	 * that no two threads access and free the same response at the same
-	 * time.
-	 */
-	struct mutex response_mutex_lock;
-};
-
-struct apr_data {
-	struct apr_hdr hdr;
-	__u8 payload[0];
-} __packed;
-
-struct apr_response_list {
-	struct list_head list;
-	struct voice_svc_cmd_response resp;
-};
-
-static struct voice_svc_device *voice_svc_dev;
-static struct class *voice_svc_class;
-static bool reg_dummy_sess;
-static void *dummy_q6_mvm;
-static void *dummy_q6_cvs;
-dev_t device_num;
-
-static int voice_svc_dummy_reg(void);
-static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data,
-					void *priv);
-
-static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
-{
-	struct voice_svc_prvt *prtd;
-	struct apr_response_list *response_list;
-	unsigned long spin_flags;
-
-	if ((data == NULL) || (priv == NULL)) {
-		pr_err("%s: data or priv is NULL\n", __func__);
-
-		return -EINVAL;
-	}
-
-	prtd = (struct voice_svc_prvt *)priv;
-	if (prtd == NULL) {
-		pr_err("%s: private data is NULL\n", __func__);
-
-		return -EINVAL;
-	}
-
-	pr_debug("%s: data->opcode %x\n", __func__,
-		 data->opcode);
-
-	if (data->opcode == RESET_EVENTS) {
-		if (data->reset_proc == APR_DEST_QDSP6) {
-			pr_debug("%s: Received ADSP reset event\n", __func__);
-
-			if (prtd->apr_q6_mvm != NULL) {
-				apr_reset(prtd->apr_q6_mvm);
-				prtd->apr_q6_mvm = NULL;
-			}
-
-			if (prtd->apr_q6_cvs != NULL) {
-				apr_reset(prtd->apr_q6_cvs);
-				prtd->apr_q6_cvs = NULL;
-			}
-		} else if (data->reset_proc == APR_DEST_MODEM) {
-			pr_debug("%s: Received Modem reset event\n", __func__);
-		}
-		/* Set the remaining member variables to default values
-		 * for RESET_EVENTS
-		 */
-		data->payload_size = 0;
-		data->payload = NULL;
-		data->src_port = 0;
-		data->dest_port = 0;
-		data->token = 0;
-	}
-
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	if (prtd->response_count < APR_MAX_RESPONSE) {
-		response_list = kmalloc(sizeof(struct apr_response_list) +
-					data->payload_size, GFP_ATOMIC);
-		if (response_list == NULL) {
-			spin_unlock_irqrestore(&prtd->response_lock,
-					       spin_flags);
-			return -ENOMEM;
-		}
-
-		response_list->resp.src_port = data->src_port;
-
-		/* Reverting the bit manipulation done in voice_svc_update_hdr
-		 * to the src_port which is returned to us as dest_port.
-		 */
-		response_list->resp.dest_port = ((data->dest_port) >> 8);
-		response_list->resp.token = data->token;
-		response_list->resp.opcode = data->opcode;
-		response_list->resp.payload_size = data->payload_size;
-		if (data->payload != NULL && data->payload_size > 0) {
-			memcpy(response_list->resp.payload, data->payload,
-			       data->payload_size);
-		}
-
-		list_add_tail(&response_list->list, &prtd->response_queue);
-		prtd->response_count++;
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-
-		wake_up(&prtd->response_wait);
-	} else {
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-		pr_err("%s: Response dropped since the queue is full\n",
-		       __func__);
-	}
-
-	return 0;
-}
-
-static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv)
-{
-	/* Do Nothing */
-	return 0;
-}
-
-static void voice_svc_update_hdr(struct voice_svc_cmd_request *apr_req_data,
-				 struct apr_data *aprdata)
-{
-
-	aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
-				       APR_HDR_LEN(sizeof(struct apr_hdr)),
-				       APR_PKT_VER);
-	/* Bit manipulation is done on src_port so that a unique ID is sent.
-	 * This manipulation can be used in the future where the same service
-	 * is tried to open multiple times with the same src_port. At that
-	 * time 0x0001 can be replaced with other values depending on the
-	 * count.
-	 */
-	aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001);
-	aprdata->hdr.dest_port = apr_req_data->dest_port;
-	aprdata->hdr.token = apr_req_data->token;
-	aprdata->hdr.opcode = apr_req_data->opcode;
-	aprdata->hdr.pkt_size  = APR_PKT_SIZE(APR_HDR_SIZE,
-					apr_req_data->payload_size);
-	memcpy(aprdata->payload, apr_req_data->payload,
-	       apr_req_data->payload_size);
-}
-
-static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
-			      struct voice_svc_prvt *prtd)
-{
-	int ret = 0;
-	void *apr_handle = NULL;
-	struct apr_data *aprdata = NULL;
-	uint32_t user_payload_size;
-	uint32_t payload_size;
-
-	pr_debug("%s\n", __func__);
-
-	if (apr_request == NULL) {
-		pr_err("%s: apr_request is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	user_payload_size = apr_request->payload_size;
-	payload_size = sizeof(struct apr_data) + user_payload_size;
-
-	if (payload_size <= user_payload_size) {
-		pr_err("%s: invalid payload size ( 0x%x ).\n",
-			__func__, user_payload_size);
-		ret = -EINVAL;
-		goto done;
-	} else {
-		aprdata = kmalloc(payload_size, GFP_KERNEL);
-		if (aprdata == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-	}
-
-	voice_svc_update_hdr(apr_request, aprdata);
-
-	if (!strcmp(apr_request->svc_name, VOICE_SVC_CVS_STR)) {
-		apr_handle = prtd->apr_q6_cvs;
-	} else if (!strcmp(apr_request->svc_name, VOICE_SVC_MVM_STR)) {
-		apr_handle = prtd->apr_q6_mvm;
-	} else {
-		pr_err("%s: Invalid service %.*s\n", __func__,
-			MAX_APR_SERVICE_NAME_LEN, apr_request->svc_name);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata);
-
-	if (ret < 0) {
-		pr_err("%s: Fail in sending request %d\n",
-			__func__, ret);
-		ret = -EINVAL;
-	} else {
-		pr_debug("%s: apr packet sent successfully %d\n",
-			 __func__, ret);
-		ret = 0;
-	}
-
-done:
-	kfree(aprdata);
-	return ret;
-}
-static int voice_svc_reg(char *svc, uint32_t src_port,
-			 struct voice_svc_prvt *prtd, void **handle)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	if (handle == NULL) {
-		pr_err("%s: handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (*handle != NULL) {
-		pr_err("%s: svc handle not NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (src_port == (APR_MAX_PORTS - 1)) {
-		pr_err("%s: SRC port reserved for dummy session\n", __func__);
-		pr_err("%s: Unable to register %s\n", __func__, svc);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	*handle = apr_register("ADSP",
-			       svc, qdsp_apr_callback,
-			       ((src_port) << 8 | 0x0001),
-			       prtd);
-
-	if (*handle == NULL) {
-		pr_err("%s: Unable to register %s\n",
-		       __func__, svc);
-
-		ret = -EFAULT;
-		goto done;
-	}
-	pr_debug("%s: Register %s successful\n",
-		__func__, svc);
-done:
-	return ret;
-}
-
-static int voice_svc_dereg(char *svc, void **handle)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	if (handle == NULL) {
-		pr_err("%s: handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (*handle == NULL) {
-		pr_err("%s: svc handle is NULL\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	ret = apr_deregister(*handle);
-	if (ret) {
-		pr_err("%s: Unable to deregister service %s; error: %d\n",
-		       __func__, svc, ret);
-
-		goto done;
-	}
-	*handle = NULL;
-	pr_debug("%s: deregister %s successful\n", __func__, svc);
-
-done:
-	return ret;
-}
-
-static int process_reg_cmd(struct voice_svc_register *apr_reg_svc,
-			   struct voice_svc_prvt *prtd)
-{
-	int ret = 0;
-	char *svc = NULL;
-	void **handle = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_MVM_STR)) {
-		svc = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_mvm;
-	} else if (!strcmp(apr_reg_svc->svc_name, VOICE_SVC_CVS_STR)) {
-		svc = VOICE_SVC_CVS_STR;
-		handle = &prtd->apr_q6_cvs;
-	} else {
-		pr_err("%s: Invalid Service: %.*s\n", __func__,
-			MAX_APR_SERVICE_NAME_LEN, apr_reg_svc->svc_name);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (apr_reg_svc->reg_flag) {
-		ret = voice_svc_reg(svc, apr_reg_svc->src_port, prtd,
-				    handle);
-	} else if (!apr_reg_svc->reg_flag) {
-		ret = voice_svc_dereg(svc, handle);
-	}
-
-done:
-	return ret;
-}
-
-static ssize_t voice_svc_write(struct file *file, const char __user *buf,
-			       size_t count, loff_t *ppos)
-{
-	int ret = 0;
-	struct voice_svc_prvt *prtd;
-	struct voice_svc_write_msg *data = NULL;
-	uint32_t cmd;
-	struct voice_svc_register *register_data = NULL;
-	struct voice_svc_cmd_request *request_data = NULL;
-	uint32_t request_payload_size;
-
-	pr_debug("%s\n", __func__);
-
-	/*
-	 * Check if enough memory is allocated to parse the message type.
-	 * Will check there is enough to hold the payload later.
-	 */
-	if (count >= sizeof(struct voice_svc_write_msg)) {
-		data = kmalloc(count, GFP_KERNEL);
-	} else {
-		pr_debug("%s: invalid data size\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (data == NULL) {
-		pr_err("%s: data kmalloc failed.\n", __func__);
-
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	ret = copy_from_user(data, buf, count);
-	if (ret) {
-		pr_err("%s: copy_from_user failed %d\n", __func__, ret);
-
-		ret = -EPERM;
-		goto done;
-	}
-
-	cmd = data->msg_type;
-	prtd = (struct voice_svc_prvt *) file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	switch (cmd) {
-	case MSG_REGISTER:
-		/*
-		 * Check that count reflects the expected size to ensure
-		 * sufficient memory was allocated. Since voice_svc_register
-		 * has a static size, this should be exact.
-		 */
-		if (count == (sizeof(struct voice_svc_write_msg) +
-			      sizeof(struct voice_svc_register))) {
-			register_data =
-				(struct voice_svc_register *)data->payload;
-			if (register_data == NULL) {
-				pr_err("%s: register data is NULL", __func__);
-				ret = -EINVAL;
-				goto done;
-			}
-			ret = process_reg_cmd(register_data, prtd);
-			if (!ret)
-				ret = count;
-		} else {
-			pr_err("%s: invalid data payload size for register command\n",
-				__func__);
-			ret = -EINVAL;
-			goto done;
-		}
-		break;
-	case MSG_REQUEST:
-		/*
-		 * Check that count reflects the expected size to ensure
-		 * sufficient memory was allocated. Since voice_svc_cmd_request
-		 * has a variable size, check the minimum value count must be to
-		 * parse the message request then check the minimum size to hold
-		 * the payload of the message request.
-		 */
-		if (count >= (sizeof(struct voice_svc_write_msg) +
-			      sizeof(struct voice_svc_cmd_request))) {
-			request_data =
-				(struct voice_svc_cmd_request *)data->payload;
-			if (request_data == NULL) {
-				pr_err("%s: request data is NULL", __func__);
-				ret = -EINVAL;
-				goto done;
-			}
-
-			request_payload_size = request_data->payload_size;
-
-			if (count >= (sizeof(struct voice_svc_write_msg) +
-				      sizeof(struct voice_svc_cmd_request) +
-				      request_payload_size)) {
-				ret = voice_svc_send_req(request_data, prtd);
-				if (!ret)
-					ret = count;
-			} else {
-				pr_err("%s: invalid request payload size\n",
-					__func__);
-				ret = -EINVAL;
-				goto done;
-			}
-		} else {
-			pr_err("%s: invalid data payload size for request command\n",
-				__func__);
-			ret = -EINVAL;
-			goto done;
-		}
-		break;
-	default:
-		pr_debug("%s: Invalid command: %u\n", __func__, cmd);
-		ret = -EINVAL;
-	}
-
-done:
-	kfree(data);
-	return ret;
-}
-
-static ssize_t voice_svc_read(struct file *file, char __user *arg,
-			      size_t count, loff_t *ppos)
-{
-	int ret = 0;
-	struct voice_svc_prvt *prtd;
-	struct apr_response_list *resp;
-	unsigned long spin_flags;
-	int size;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = (struct voice_svc_prvt *)file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	mutex_lock(&prtd->response_mutex_lock);
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	if (list_empty(&prtd->response_queue)) {
-		spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-		pr_debug("%s: wait for a response\n", __func__);
-
-		ret = wait_event_interruptible_timeout(prtd->response_wait,
-					!list_empty(&prtd->response_queue),
-					msecs_to_jiffies(TIMEOUT_MS));
-		if (ret == 0) {
-			pr_debug("%s: Read timeout\n", __func__);
-
-			ret = -ETIMEDOUT;
-			goto unlock;
-		} else if (ret > 0 && !list_empty(&prtd->response_queue)) {
-			pr_debug("%s: Interrupt received for response\n",
-				 __func__);
-		} else if (ret < 0) {
-			pr_debug("%s: Interrupted by SIGNAL %d\n",
-				 __func__, ret);
-
-			goto unlock;
-		}
-
-		spin_lock_irqsave(&prtd->response_lock, spin_flags);
-	}
-
-	resp = list_first_entry(&prtd->response_queue,
-				struct apr_response_list, list);
-
-	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-
-	size = resp->resp.payload_size +
-	       sizeof(struct voice_svc_cmd_response);
-
-	if (count < size) {
-		pr_err("%s: Invalid payload size %zd, %d\n",
-		       __func__, count, size);
-
-		ret = -ENOMEM;
-		goto unlock;
-	}
-
-	if (!access_ok(VERIFY_WRITE, arg, size)) {
-		pr_err("%s: Access denied to write\n",
-		       __func__);
-
-		ret = -EPERM;
-		goto unlock;
-	}
-
-	ret = copy_to_user(arg, &resp->resp,
-			 sizeof(struct voice_svc_cmd_response) +
-			 resp->resp.payload_size);
-	if (ret) {
-		pr_err("%s: copy_to_user failed %d\n", __func__, ret);
-
-		ret = -EPERM;
-		goto unlock;
-	}
-
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	list_del(&resp->list);
-	prtd->response_count--;
-	kfree(resp);
-
-	spin_unlock_irqrestore(&prtd->response_lock,
-				spin_flags);
-
-	ret = count;
-
-unlock:
-	mutex_unlock(&prtd->response_mutex_lock);
-done:
-	return ret;
-}
-
-static int voice_svc_dummy_reg(void)
-{
-	uint32_t src_port = APR_MAX_PORTS - 1;
-
-	pr_debug("%s\n", __func__);
-	dummy_q6_mvm = apr_register("ADSP", "MVM",
-				qdsp_dummy_apr_callback,
-				src_port,
-				NULL);
-	if (dummy_q6_mvm == NULL) {
-		pr_err("%s: Unable to register dummy MVM\n", __func__);
-		goto err;
-	}
-
-	dummy_q6_cvs = apr_register("ADSP", "CVS",
-				qdsp_dummy_apr_callback,
-				src_port,
-				NULL);
-	if (dummy_q6_cvs == NULL) {
-		pr_err("%s: Unable to register dummy CVS\n", __func__);
-		goto err;
-	}
-	return 0;
-err:
-	if (dummy_q6_mvm != NULL) {
-		apr_deregister(dummy_q6_mvm);
-		dummy_q6_mvm = NULL;
-	}
-	return -EINVAL;
-}
-
-static int voice_svc_open(struct inode *inode, struct file *file)
-{
-	struct voice_svc_prvt *prtd = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL);
-
-	if (prtd == NULL)
-		return -ENOMEM;
-
-	memset(prtd, 0, sizeof(struct voice_svc_prvt));
-	prtd->apr_q6_cvs = NULL;
-	prtd->apr_q6_mvm = NULL;
-	prtd->response_count = 0;
-	INIT_LIST_HEAD(&prtd->response_queue);
-	init_waitqueue_head(&prtd->response_wait);
-	spin_lock_init(&prtd->response_lock);
-	mutex_init(&prtd->response_mutex_lock);
-	file->private_data = (void *)prtd;
-
-	/* Current APR implementation doesn't support session based
-	 * multiple service registrations. The apr_deregister()
-	 * function sets the destination and client IDs to zero, if
-	 * deregister is called for a single service instance.
-	 * To avoid this, register for additional services.
-	 */
-	if (!reg_dummy_sess) {
-		voice_svc_dummy_reg();
-		reg_dummy_sess = 1;
-	}
-	return 0;
-}
-
-static int voice_svc_release(struct inode *inode, struct file *file)
-{
-	int ret = 0;
-	struct apr_response_list *resp = NULL;
-	unsigned long spin_flags;
-	struct voice_svc_prvt *prtd = NULL;
-	char *svc_name = NULL;
-	void **handle = NULL;
-
-	pr_debug("%s\n", __func__);
-
-	prtd = (struct voice_svc_prvt *)file->private_data;
-	if (prtd == NULL) {
-		pr_err("%s: prtd is NULL\n", __func__);
-
-		ret = -EINVAL;
-		goto done;
-	}
-
-	if (prtd->apr_q6_cvs != NULL) {
-		svc_name = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_cvs;
-		ret = voice_svc_dereg(svc_name, handle);
-		if (ret)
-			pr_err("%s: Failed to dereg CVS %d\n", __func__, ret);
-	}
-
-	if (prtd->apr_q6_mvm != NULL) {
-		svc_name = VOICE_SVC_MVM_STR;
-		handle = &prtd->apr_q6_mvm;
-		ret = voice_svc_dereg(svc_name, handle);
-		if (ret)
-			pr_err("%s: Failed to dereg MVM %d\n", __func__, ret);
-	}
-
-	mutex_lock(&prtd->response_mutex_lock);
-	spin_lock_irqsave(&prtd->response_lock, spin_flags);
-
-	while (!list_empty(&prtd->response_queue)) {
-		pr_debug("%s: Remove item from response queue\n", __func__);
-
-		resp = list_first_entry(&prtd->response_queue,
-					struct apr_response_list, list);
-		list_del(&resp->list);
-		prtd->response_count--;
-		kfree(resp);
-	}
-
-	spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
-	mutex_unlock(&prtd->response_mutex_lock);
-
-	mutex_destroy(&prtd->response_mutex_lock);
-
-	kfree(file->private_data);
-	file->private_data = NULL;
-
-done:
-	return ret;
-}
-
-static const struct file_operations voice_svc_fops = {
-	.owner =                THIS_MODULE,
-	.open =                 voice_svc_open,
-	.read =                 voice_svc_read,
-	.write =                voice_svc_write,
-	.release =              voice_svc_release,
-};
-
-
-static int voice_svc_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-
-	voice_svc_dev = devm_kzalloc(&pdev->dev,
-				  sizeof(struct voice_svc_device), GFP_KERNEL);
-	if (!voice_svc_dev) {
-		ret = -ENOMEM;
-		goto done;
-	}
-
-	ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER,
-				  VOICE_SVC_DRIVER_NAME);
-	if (ret) {
-		pr_err("%s: Failed to alloc chrdev\n", __func__);
-		ret = -ENODEV;
-		goto chrdev_err;
-	}
-
-	voice_svc_dev->major = MAJOR(device_num);
-	voice_svc_class = class_create(THIS_MODULE, VOICE_SVC_DRIVER_NAME);
-	if (IS_ERR(voice_svc_class)) {
-		ret = PTR_ERR(voice_svc_class);
-		pr_err("%s: Failed to create class; err = %d\n", __func__,
-			ret);
-		goto class_err;
-	}
-
-	voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num,
-					   NULL, VOICE_SVC_DRIVER_NAME);
-	if (IS_ERR(voice_svc_dev->dev)) {
-		ret = PTR_ERR(voice_svc_dev->dev);
-		pr_err("%s: Failed to create device; err = %d\n", __func__,
-			ret);
-		goto dev_err;
-	}
-
-	voice_svc_dev->cdev = cdev_alloc();
-	if (!voice_svc_dev->cdev) {
-		pr_err("%s: Failed to alloc cdev\n", __func__);
-		ret = -ENOMEM;
-		goto cdev_alloc_err;
-	}
-
-	cdev_init(voice_svc_dev->cdev, &voice_svc_fops);
-	ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER);
-	if (ret) {
-		pr_err("%s: Failed to register chrdev; err = %d\n", __func__,
-			ret);
-		goto add_err;
-	}
-	pr_debug("%s: Device created\n", __func__);
-	goto done;
-
-add_err:
-	cdev_del(voice_svc_dev->cdev);
-cdev_alloc_err:
-	device_destroy(voice_svc_class, device_num);
-dev_err:
-	class_destroy(voice_svc_class);
-class_err:
-	unregister_chrdev_region(0, MINOR_NUMBER);
-chrdev_err:
-	kfree(voice_svc_dev);
-done:
-	return ret;
-}
-
-static int voice_svc_remove(struct platform_device *pdev)
-{
-	pr_debug("%s\n", __func__);
-
-	cdev_del(voice_svc_dev->cdev);
-	kfree(voice_svc_dev->cdev);
-	device_destroy(voice_svc_class, device_num);
-	class_destroy(voice_svc_class);
-	unregister_chrdev_region(0, MINOR_NUMBER);
-	kfree(voice_svc_dev);
-
-	return 0;
-}
-
-static const struct of_device_id voice_svc_of_match[] = {
-	{.compatible = "qcom,msm-voice-svc"},
-	{ }
-};
-MODULE_DEVICE_TABLE(of, voice_svc_of_match);
-
-static struct platform_driver voice_svc_driver = {
-	.probe          = voice_svc_probe,
-	.remove         = voice_svc_remove,
-	.driver         = {
-		.name   = "msm-voice-svc",
-		.owner  = THIS_MODULE,
-		.of_match_table = voice_svc_of_match,
-	},
-};
-
-static int __init voice_svc_init(void)
-{
-	pr_debug("%s\n", __func__);
-
-	return platform_driver_register(&voice_svc_driver);
-}
-
-static void __exit voice_svc_exit(void)
-{
-	pr_debug("%s\n", __func__);
-
-	platform_driver_unregister(&voice_svc_driver);
-}
-
-module_init(voice_svc_init);
-module_exit(voice_svc_exit);
-
-MODULE_DESCRIPTION("Soc QDSP6v2 Voice Service driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index e30c159..81d0bb0 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/atomic.h>
+#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -70,6 +71,7 @@
 	struct rpmh_msg *msg_pool;
 	DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
 	bool dirty;
+	bool in_solver_mode;
 };
 
 struct rpmh_client {
@@ -458,10 +460,21 @@
 	int count = 0;
 	int ret, i, j, k;
 	bool complete_set;
+	unsigned long flags;
+	struct rpmh_mbox *rpm;
 
 	if (rpmh_standalone)
 		return 0;
 
+	/* Do not allow setting wake votes when in solver mode */
+	rpm = rc->rpmh;
+	spin_lock_irqsave(&rpm->lock, flags);
+	if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) {
+		spin_unlock_irqrestore(&rpm->lock, flags);
+		return -EIO;
+	}
+	spin_unlock_irqrestore(&rpm->lock, flags);
+
 	while (n[count++])
 		;
 	count--;
@@ -491,8 +504,12 @@
 	/* Create async request batches */
 	for (i = 0; i < count; i++) {
 		rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
-		if (IS_ERR_OR_NULL(rpm_msg[i]))
+		if (IS_ERR_OR_NULL(rpm_msg[i])) {
+			/* Clean up our call by spoofing tx_done */
+			for (j = 0 ; j < i; j++)
+				rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
 			return PTR_ERR(rpm_msg[i]);
+		}
 		cmd += n[i];
 	}
 
@@ -505,10 +522,13 @@
 			rpm_msg[i]->wait_count = &wait_count;
 			/* Bypass caching and write to mailbox directly */
 			ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
-			if (ret < 0)
-				return ret;
+			if (ret < 0) {
+				pr_err("Error(%d) sending RPM message addr=0x%x\n",
+					ret, rpm_msg[i]->msg.payload[0].addr);
+				break;
+			}
 		}
-		wait_event(waitq, atomic_read(&wait_count) == 0);
+		wait_event(waitq, atomic_read(&wait_count) == (count - i));
 	} else {
 		/* Send Sleep requests to the controller, expect no response */
 		for (i = 0; i < count; i++) {
@@ -526,6 +546,43 @@
 EXPORT_SYMBOL(rpmh_write_passthru);
 
 /**
+ * rpmh_mode_solver_set: Indicate that the RSC controller hardware has
+ * been configured to be in solver mode
+ *
+ * @rc: The RPMH handle
+ * @enable: Boolean value indicating if the controller is in solver mode.
+ *
+ * When solver mode is enabled, passthru API will not be able to send wake
+ * votes, just awake and active votes.
+ */
+int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
+{
+	struct rpmh_mbox *rpm;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	if (rpmh_standalone)
+		return 0;
+
+	rpm = rc->rpmh;
+	do {
+		spin_lock_irqsave(&rpm->lock, flags);
+		if (mbox_controller_is_idle(rc->chan)) {
+			rpm->in_solver_mode = enable;
+			spin_unlock_irqrestore(&rpm->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&rpm->lock, flags);
+		udelay(10);
+	} while (1);
+
+	return 0;
+}
+EXPORT_SYMBOL(rpmh_mode_solver_set);
+
+/**
  * rpmh_write_control: Write async control commands to the controller
  *
  * @rc: The RPMh handle got from rpmh_get_dev_channel
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 2855a15..d8c5a8f 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -56,9 +56,13 @@
 	 * Set up the wake up value offset from the current time.
 	 * Convert us to ns to allow div by 19.2 Mhz tick timer.
 	 */
-	sleep_val *= NSEC_PER_USEC;
-	do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
-	sleep_val += arch_counter_get_cntvct();
+	if (sleep_val) {
+		sleep_val *= NSEC_PER_USEC;
+		do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
+		sleep_val += arch_counter_get_cntvct();
+	} else {
+		sleep_val = ~0ULL;
+	}
 
 	return setup_wakeup(sleep_val);
 }
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 37125c0..7da9211 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -909,13 +909,11 @@
 	struct cpufreq_cooling_device *cpufreq_dev;
 	char dev_name[THERMAL_NAME_LENGTH];
 	struct cpufreq_frequency_table *pos, *table;
-	struct cpumask temp_mask;
 	unsigned int freq, i, num_cpus;
 	int ret;
 	struct thermal_cooling_device_ops *cooling_ops;
 
-	cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
-	policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
+	policy = cpufreq_cpu_get(cpumask_first(clip_cpus));
 	if (!policy) {
 		pr_debug("%s: CPUFreq policy not found\n", __func__);
 		return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index f0be6e9..984241f9 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -35,11 +35,6 @@
 
 #include "thermal_core.h"
 
-#define for_each_tz_sibling(pos, head)                                         \
-	for (pos = list_first_entry((head), struct __thermal_zone, list);\
-		&(pos->list) != (head);                                  \
-		pos = list_next_entry(pos, list))                        \
-
 /***   Private data structures to represent thermal device tree data ***/
 /**
  * struct __thermal_bind_param - a match between trip and cooling device
@@ -436,7 +431,7 @@
 	enum thermal_trip_type type = 0;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		for (trip = 0; trip < data->ntrips; trip++) {
 			of_thermal_get_trip_type(zone, trip, &type);
@@ -499,7 +494,7 @@
 	struct list_head *head;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		thermal_zone_device_update(zone, THERMAL_EVENT_UNSPECIFIED);
 	}
@@ -684,7 +679,7 @@
 void thermal_zone_of_sensor_unregister(struct device *dev,
 				       struct thermal_zone_device *tzd)
 {
-	struct __thermal_zone *tz;
+	struct __thermal_zone *tz, *next;
 	struct thermal_zone_device *pos;
 	struct list_head *head;
 
@@ -698,7 +693,7 @@
 		return;
 
 	head = &tz->senps->first_tz;
-	for_each_tz_sibling(tz, head) {
+	list_for_each_entry_safe(tz, next, head, list) {
 		pos = tz->tzd;
 		mutex_lock(&pos->lock);
 		pos->ops->get_temp = NULL;
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 74f5ce0..65dc2df 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -34,6 +34,9 @@
 
 #include "../thermal_core.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/lmh.h>
+
 #define LIMITS_DCVSH                0x10
 #define LIMITS_PROFILE_CHANGE       0x01
 #define LIMITS_NODE_DCVS            0x44435653
@@ -57,12 +60,13 @@
 #define LIMITS_DOMAIN_MIN           0x444D494E
 
 #define LIMITS_TEMP_DEFAULT         75000
+#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
 #define LIMITS_LOW_THRESHOLD_OFFSET 500
 #define LIMITS_POLLING_DELAY_MS     10
-#define LIMITS_CLUSTER_0_REQ        0x179C1B04
-#define LIMITS_CLUSTER_1_REQ        0x179C3B04
-#define LIMITS_CLUSTER_0_INT_CLR    0x179CE808
-#define LIMITS_CLUSTER_1_INT_CLR    0x179CC808
+#define LIMITS_CLUSTER_0_REQ        0x17D43704
+#define LIMITS_CLUSTER_1_REQ        0x17D45F04
+#define LIMITS_CLUSTER_0_INT_CLR    0x17D78808
+#define LIMITS_CLUSTER_1_INT_CLR    0x17D70808
 #define LIMITS_CLUSTER_0_MIN_FREQ   0x17D78BC0
 #define LIMITS_CLUSTER_1_MIN_FREQ   0x17D70BC0
 #define dcvsh_get_frequency(_val, _max) do { \
@@ -104,6 +108,7 @@
 };
 
 LIST_HEAD(lmh_dcvs_hw_list);
+DEFINE_MUTEX(lmh_dcvs_list_access);
 
 static int limits_dcvs_get_freq_limits(uint32_t cpu, unsigned long *max_freq,
 					 unsigned long *min_freq)
@@ -145,6 +150,9 @@
 		goto notify_exit;
 	}
 
+	pr_debug("CPU:%d max value read:%lu\n",
+			cpumask_first(&hw->core_map),
+			max_limit);
 	freq_val = FREQ_KHZ_TO_HZ(max_limit);
 	rcu_read_lock();
 	opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
@@ -164,6 +172,9 @@
 	max_limit = FREQ_HZ_TO_KHZ(freq_val);
 
 	sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
+	pr_debug("CPU:%d max limit:%lu\n", cpumask_first(&hw->core_map),
+			max_limit);
+	trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
 
 notify_exit:
 	hw->hw_freq_limit = max_limit;
@@ -259,7 +270,7 @@
 	struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
 	int ret = 0;
 
-	if (high < LIMITS_LOW_THRESHOLD_OFFSET || low < 0) {
+	if (high >= LIMITS_TEMP_HIGH_THRESH_MAX || low < 0) {
 		pr_err("Value out of range low:%d high:%d\n",
 				low, high);
 		return -EINVAL;
@@ -298,10 +309,14 @@
 {
 	struct limits_dcvs_hw *hw;
 
+	mutex_lock(&lmh_dcvs_list_access);
 	list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
-		if (cpumask_test_cpu(cpu, &hw->core_map))
+		if (cpumask_test_cpu(cpu, &hw->core_map)) {
+			mutex_unlock(&lmh_dcvs_list_access);
 			return hw;
+		}
 	}
+	mutex_unlock(&lmh_dcvs_list_access);
 
 	return NULL;
 }
@@ -343,6 +358,7 @@
 	ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
 				  LIMITS_DOMAIN_MAX, max_freq);
 	mutex_unlock(&hw->access_lock);
+	lmh_dcvs_notify(hw);
 
 	return ret;
 }
@@ -377,6 +393,42 @@
 	.floor_limit = lmh_set_min_limit,
 };
 
+static int limits_cpu_online(unsigned int online_cpu)
+{
+	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(online_cpu);
+	unsigned int idx = 0, cpu = 0;
+
+	if (!hw)
+		return 0;
+
+	for_each_cpu(cpu, &hw->core_map) {
+		cpumask_t cpu_mask  = { CPU_BITS_NONE };
+
+		if (cpu != online_cpu) {
+			idx++;
+			continue;
+		} else if (hw->cdev_data[idx].cdev) {
+			return 0;
+		}
+		cpumask_set_cpu(cpu, &cpu_mask);
+		hw->cdev_data[idx].max_freq = U32_MAX;
+		hw->cdev_data[idx].min_freq = 0;
+		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
+						&cpu_mask, &cd_ops);
+		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev)) {
+			pr_err("CPU:%u cooling device register error:%ld\n",
+				cpu, PTR_ERR(hw->cdev_data[idx].cdev));
+			hw->cdev_data[idx].cdev = NULL;
+		} else {
+			pr_debug("CPU:%u cooling device registered\n", cpu);
+		}
+		break;
+
+	}
+
+	return 0;
+}
+
 static int limits_dcvs_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -387,7 +439,7 @@
 	struct device_node *cpu_node, *lmh_node;
 	uint32_t request_reg, clear_reg, min_reg;
 	unsigned long max_freq, min_freq;
-	int cpu, idx;
+	int cpu;
 	cpumask_t mask = { CPU_BITS_NONE };
 
 	for_each_possible_cpu(cpu) {
@@ -480,22 +532,6 @@
 	if (IS_ERR_OR_NULL(tzdev))
 		return PTR_ERR(tzdev);
 
-	/* Setup cooling devices to request mitigation states */
-	mutex_init(&hw->access_lock);
-	idx = 0;
-	for_each_cpu(cpu, &hw->core_map) {
-		cpumask_t cpu_mask  = { CPU_BITS_NONE };
-
-		cpumask_set_cpu(cpu, &cpu_mask);
-		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
-						&cpu_mask, &cd_ops);
-		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev))
-			return PTR_ERR(hw->cdev_data[idx].cdev);
-		hw->cdev_data[idx].max_freq = U32_MAX;
-		hw->cdev_data[idx].min_freq = 0;
-		idx++;
-	}
-
 	switch (affinity) {
 	case 0:
 		request_reg = LIMITS_CLUSTER_0_REQ;
@@ -508,33 +544,36 @@
 		min_reg = LIMITS_CLUSTER_1_MIN_FREQ;
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unregister_sensor;
 	};
 
+	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+	if (!hw->min_freq_reg) {
+		pr_err("min frequency enable register remap failed\n");
+		ret = -ENOMEM;
+		goto unregister_sensor;
+	}
+
+	mutex_init(&hw->access_lock);
+	init_timer_deferrable(&hw->poll_timer);
+	hw->poll_timer.data = (unsigned long)hw;
+	hw->poll_timer.function = limits_dcvs_poll;
 	hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
 	if (!hw->osm_hw_reg) {
 		pr_err("register remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
 	hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
 	if (!hw->int_clr_reg) {
 		pr_err("interrupt clear reg remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
-	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
-	if (!hw->min_freq_reg) {
-		pr_err("min frequency enable register remap failed\n");
-		return -ENOMEM;
-	}
-	init_timer_deferrable(&hw->poll_timer);
-	hw->poll_timer.data = (unsigned long)hw;
-	hw->poll_timer.function = limits_dcvs_poll;
 
 	hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
 	if (hw->irq_num < 0) {
-		ret = hw->irq_num;
-		pr_err("Error getting IRQ number. err:%d\n", ret);
-		return ret;
+		pr_err("Error getting IRQ number. err:%d\n", hw->irq_num);
+		goto probe_exit;
 	}
 	atomic_set(&hw->is_irq_enabled, 1);
 	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
@@ -542,11 +581,26 @@
 		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
 	if (ret) {
 		pr_err("Error registering for irq. err:%d\n", ret);
-		return ret;
+		ret = 0;
+		goto probe_exit;
 	}
 
+probe_exit:
+	mutex_lock(&lmh_dcvs_list_access);
 	INIT_LIST_HEAD(&hw->list);
 	list_add(&hw->list, &lmh_dcvs_hw_list);
+	mutex_unlock(&lmh_dcvs_list_access);
+
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
+				limits_cpu_online, NULL);
+	if (ret < 0)
+		goto unregister_sensor;
+	ret = 0;
+
+	return ret;
+
+unregister_sensor:
+	thermal_zone_of_sensor_unregister(&pdev->dev, tzdev);
 
 	return ret;
 }
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 8d706cd..342160e 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -34,6 +34,7 @@
 #include <linux/qpnp/qpnp-adc.h>
 #include <linux/thermal.h>
 #include <linux/platform_device.h>
+#include "thermal_core.h"
 
 /* QPNP VADC TM register definition */
 #define QPNP_REVISION3					0x2
@@ -41,126 +42,15 @@
 #define QPNP_PERPH_TYPE2				0x2
 #define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT		2
 #define QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT		0x22
-#define QPNP_STATUS1					0x8
-#define QPNP_STATUS1_OP_MODE				4
-#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS		BIT(2)
-#define QPNP_STATUS1_REQ_STS				BIT(1)
-#define QPNP_STATUS1_EOC				BIT(0)
-#define QPNP_STATUS2					0x9
-#define QPNP_STATUS2_CONV_SEQ_STATE			6
-#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG		BIT(1)
-#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS		BIT(0)
-#define QPNP_CONV_TIMEOUT_ERR				2
-
-#define QPNP_MODE_CTL					0x40
-#define QPNP_OP_MODE_SHIFT				3
-#define QPNP_VREF_XO_THM_FORCE				BIT(2)
-#define QPNP_AMUX_TRIM_EN				BIT(1)
-#define QPNP_ADC_TRIM_EN				BIT(0)
 #define QPNP_EN_CTL1					0x46
 #define QPNP_ADC_TM_EN					BIT(7)
 #define QPNP_BTM_CONV_REQ				0x47
 #define QPNP_ADC_CONV_REQ_EN				BIT(7)
 
-#define QPNP_ADC_CH_SEL_CTL				0x48
-#define QPNP_ADC_DIG_PARAM				0x50
-#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT		3
-#define QPNP_HW_SETTLE_DELAY				0x51
+#define QPNP_OP_MODE_SHIFT				3
 #define QPNP_CONV_REQ					0x52
 #define QPNP_CONV_REQ_SET				BIT(7)
-#define QPNP_CONV_SEQ_CTL				0x54
-#define QPNP_CONV_SEQ_HOLDOFF_SHIFT			4
-#define QPNP_CONV_SEQ_TRIG_CTL				0x55
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL			0x57
-#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2			0x58
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
-#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
 
-#define QPNP_ADC_MEAS_INTERVAL_OP_CTL			0x59
-#define QPNP_ADC_MEAS_INTERVAL_OP			BIT(7)
-
-#define QPNP_FAST_AVG_CTL				0x5a
-#define QPNP_FAST_AVG_EN				0x5b
-#define QPNP_FAST_AVG_ENABLED				BIT(7)
-
-#define QPNP_M0_LOW_THR_LSB				0x5c
-#define QPNP_M0_LOW_THR_MSB				0x5d
-#define QPNP_M0_HIGH_THR_LSB				0x5e
-#define QPNP_M0_HIGH_THR_MSB				0x5f
-#define QPNP_M1_ADC_CH_SEL_CTL				0x68
-#define QPNP_M1_LOW_THR_LSB				0x69
-#define QPNP_M1_LOW_THR_MSB				0x6a
-#define QPNP_M1_HIGH_THR_LSB				0x6b
-#define QPNP_M1_HIGH_THR_MSB				0x6c
-#define QPNP_M2_ADC_CH_SEL_CTL				0x70
-#define QPNP_M2_LOW_THR_LSB				0x71
-#define QPNP_M2_LOW_THR_MSB				0x72
-#define QPNP_M2_HIGH_THR_LSB				0x73
-#define QPNP_M2_HIGH_THR_MSB				0x74
-#define QPNP_M3_ADC_CH_SEL_CTL				0x78
-#define QPNP_M3_LOW_THR_LSB				0x79
-#define QPNP_M3_LOW_THR_MSB				0x7a
-#define QPNP_M3_HIGH_THR_LSB				0x7b
-#define QPNP_M3_HIGH_THR_MSB				0x7c
-#define QPNP_M4_ADC_CH_SEL_CTL				0x80
-#define QPNP_M4_LOW_THR_LSB				0x81
-#define QPNP_M4_LOW_THR_MSB				0x82
-#define QPNP_M4_HIGH_THR_LSB				0x83
-#define QPNP_M4_HIGH_THR_MSB				0x84
-#define QPNP_M5_ADC_CH_SEL_CTL				0x88
-#define QPNP_M5_LOW_THR_LSB				0x89
-#define QPNP_M5_LOW_THR_MSB				0x8a
-#define QPNP_M5_HIGH_THR_LSB				0x8b
-#define QPNP_M5_HIGH_THR_MSB				0x8c
-#define QPNP_M6_ADC_CH_SEL_CTL				0x90
-#define QPNP_M6_LOW_THR_LSB				0x91
-#define QPNP_M6_LOW_THR_MSB				0x92
-#define QPNP_M6_HIGH_THR_LSB				0x93
-#define QPNP_M6_HIGH_THR_MSB				0x94
-#define QPNP_M7_ADC_CH_SEL_CTL				0x98
-#define QPNP_M7_LOW_THR_LSB				0x99
-#define QPNP_M7_LOW_THR_MSB				0x9a
-#define QPNP_M7_HIGH_THR_LSB				0x9b
-#define QPNP_M7_HIGH_THR_MSB				0x9c
-
-#define QPNP_ADC_TM_MULTI_MEAS_EN			0x41
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M0			BIT(0)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M1			BIT(1)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M2			BIT(2)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M3			BIT(3)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M4			BIT(4)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M5			BIT(5)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M6			BIT(6)
-#define QPNP_ADC_TM_MULTI_MEAS_EN_M7			BIT(7)
-#define QPNP_ADC_TM_LOW_THR_INT_EN			0x42
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M0			BIT(0)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M1			BIT(1)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M2			BIT(2)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M3			BIT(3)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M4			BIT(4)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M5			BIT(5)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M6			BIT(6)
-#define QPNP_ADC_TM_LOW_THR_INT_EN_M7			BIT(7)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN			0x43
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M0			BIT(0)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M1			BIT(1)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M2			BIT(2)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M3			BIT(3)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M4			BIT(4)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M5			BIT(5)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M6			BIT(6)
-#define QPNP_ADC_TM_HIGH_THR_INT_EN_M7			BIT(7)
-
-#define QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL			0x59
-#define QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL			0x6d
-#define QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL			0x75
-#define QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL			0x7d
-#define QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL			0x85
-#define QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL			0x8d
-#define QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL			0x95
-#define QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL			0x9d
 #define QPNP_ADC_TM_STATUS1				0x8
 #define QPNP_ADC_TM_STATUS_LOW				0xa
 #define QPNP_ADC_TM_STATUS_HIGH				0xb
@@ -172,22 +62,22 @@
 #define QPNP_ADC_TM_THR_LSB_MASK(val)			(val & 0xff)
 #define QPNP_ADC_TM_THR_MSB_MASK(val)			((val & 0xff00) >> 8)
 
-#define QPNP_MIN_TIME			2000
-#define QPNP_MAX_TIME			2100
-#define QPNP_RETRY			1000
-
 /* QPNP ADC TM HC start */
-#define QPNP_BTM_HC_STATUS1		0x08
-#define QPNP_BTM_HC_STATUS_LOW		0x0a
-#define QPNP_BTM_HC_STATUS_HIGH		0x0b
+#define QPNP_BTM_HC_STATUS1				0x08
+#define QPNP_BTM_HC_STATUS_LOW				0x0a
+#define QPNP_BTM_HC_STATUS_HIGH				0x0b
 
-#define QPNP_BTM_HC_ADC_DIG_PARAM	0x42
-#define QPNP_BTM_HC_FAST_AVG_CTL	0x43
-#define QPNP_BTM_EN_CTL1		0x46
-#define QPNP_BTM_CONV_REQ		0x47
+#define QPNP_BTM_HC_ADC_DIG_PARAM			0x42
+#define QPNP_BTM_HC_FAST_AVG_CTL			0x43
+#define QPNP_BTM_EN_CTL1				0x46
+#define QPNP_BTM_CONV_REQ				0x47
 
-#define QPNP_BTM_MEAS_INTERVAL_CTL	0x50
-#define QPNP_BTM_MEAS_INTERVAL_CTL2	0x51
+#define QPNP_BTM_MEAS_INTERVAL_CTL			0x50
+#define QPNP_BTM_MEAS_INTERVAL_CTL2			0x51
+#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT		0x3
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT		0x4
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK		0xf0
+#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK		0xf
 
 #define QPNP_BTM_Mn_ADC_CH_SEL_CTL(n)		((n * 8) + 0x60)
 #define QPNP_BTM_Mn_LOW_THR0(n)			((n * 8) + 0x61)
@@ -208,6 +98,7 @@
 
 #define QPNP_BTM_Mn_DATA0(n)			((n * 2) + 0xa0)
 #define QPNP_BTM_Mn_DATA1(n)			((n * 2) + 0xa1)
+#define QPNP_BTM_CHANNELS			8
 
 /* QPNP ADC TM HC end */
 
@@ -277,69 +168,6 @@
 
 LIST_HEAD(qpnp_adc_tm_device_list);
 
-struct qpnp_adc_tm_trip_reg_type {
-	enum qpnp_adc_tm_channel_select	btm_amux_chan;
-	uint16_t			low_thr_lsb_addr;
-	uint16_t			low_thr_msb_addr;
-	uint16_t			high_thr_lsb_addr;
-	uint16_t			high_thr_msb_addr;
-	u8				multi_meas_en;
-	u8				low_thr_int_chan_en;
-	u8				high_thr_int_chan_en;
-	u8				meas_interval_ctl;
-};
-
-static struct qpnp_adc_tm_trip_reg_type adc_tm_data[] = {
-	[QPNP_ADC_TM_CHAN0] = {QPNP_ADC_TM_M0_ADC_CH_SEL_CTL,
-		QPNP_M0_LOW_THR_LSB,
-		QPNP_M0_LOW_THR_MSB, QPNP_M0_HIGH_THR_LSB,
-		QPNP_M0_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M0,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M0, QPNP_ADC_TM_HIGH_THR_INT_EN_M0,
-		QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN1] = {QPNP_ADC_TM_M1_ADC_CH_SEL_CTL,
-		QPNP_M1_LOW_THR_LSB,
-		QPNP_M1_LOW_THR_MSB, QPNP_M1_HIGH_THR_LSB,
-		QPNP_M1_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M1,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M1, QPNP_ADC_TM_HIGH_THR_INT_EN_M1,
-		QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN2] = {QPNP_ADC_TM_M2_ADC_CH_SEL_CTL,
-		QPNP_M2_LOW_THR_LSB,
-		QPNP_M2_LOW_THR_MSB, QPNP_M2_HIGH_THR_LSB,
-		QPNP_M2_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M2,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M2, QPNP_ADC_TM_HIGH_THR_INT_EN_M2,
-		QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN3] = {QPNP_ADC_TM_M3_ADC_CH_SEL_CTL,
-		QPNP_M3_LOW_THR_LSB,
-		QPNP_M3_LOW_THR_MSB, QPNP_M3_HIGH_THR_LSB,
-		QPNP_M3_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M3,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M3, QPNP_ADC_TM_HIGH_THR_INT_EN_M3,
-		QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN4] = {QPNP_ADC_TM_M4_ADC_CH_SEL_CTL,
-		QPNP_M4_LOW_THR_LSB,
-		QPNP_M4_LOW_THR_MSB, QPNP_M4_HIGH_THR_LSB,
-		QPNP_M4_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M4,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M4, QPNP_ADC_TM_HIGH_THR_INT_EN_M4,
-		QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN5] = {QPNP_ADC_TM_M5_ADC_CH_SEL_CTL,
-		QPNP_M5_LOW_THR_LSB,
-		QPNP_M5_LOW_THR_MSB, QPNP_M5_HIGH_THR_LSB,
-		QPNP_M5_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M5,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M5, QPNP_ADC_TM_HIGH_THR_INT_EN_M5,
-		QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN6] = {QPNP_ADC_TM_M6_ADC_CH_SEL_CTL,
-		QPNP_M6_LOW_THR_LSB,
-		QPNP_M6_LOW_THR_MSB, QPNP_M6_HIGH_THR_LSB,
-		QPNP_M6_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M6,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M6, QPNP_ADC_TM_HIGH_THR_INT_EN_M6,
-		QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL},
-	[QPNP_ADC_TM_CHAN7] = {QPNP_ADC_TM_M7_ADC_CH_SEL_CTL,
-		QPNP_M7_LOW_THR_LSB,
-		QPNP_M7_LOW_THR_MSB, QPNP_M7_HIGH_THR_LSB,
-		QPNP_M7_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M7,
-		QPNP_ADC_TM_LOW_THR_INT_EN_M7, QPNP_ADC_TM_HIGH_THR_INT_EN_M7,
-		QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL},
-};
-
 static struct qpnp_adc_tm_reverse_scale_fn adc_tm_rscale_fn[] = {
 	[SCALE_R_VBATT] = {qpnp_adc_vbatt_rscaler},
 	[SCALE_RBATT_THERM] = {qpnp_adc_btm_scaler},
@@ -380,33 +208,6 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_fast_avg_en(struct qpnp_adc_tm_chip *chip,
-				uint32_t *fast_avg_sample)
-{
-	int rc = 0, version = 0;
-	u8 fast_avg_en = 0;
-
-	version = qpnp_adc_get_revid_version(chip->dev);
-	if (!((version == QPNP_REV_ID_8916_1_0) ||
-		(version == QPNP_REV_ID_8916_1_1) ||
-		(version == QPNP_REV_ID_8916_2_0))) {
-		pr_debug("fast-avg-en not required for this version\n");
-		return rc;
-	}
-
-	fast_avg_en = QPNP_FAST_AVG_ENABLED;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_EN, fast_avg_en, 1);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg enable err\n");
-		return rc;
-	}
-
-	if (*fast_avg_sample >= 3)
-		*fast_avg_sample = 2;
-
-	return rc;
-}
-
 static int qpnp_adc_tm_check_vreg_vote(struct qpnp_adc_tm_chip *chip)
 {
 	int rc = 0;
@@ -443,13 +244,11 @@
 		return rc;
 	}
 
-	if (chip->adc_tm_hc) {
-		data = QPNP_ADC_CONV_REQ_EN;
-		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-		if (rc < 0) {
-			pr_err("adc-tm enable failed\n");
-			return rc;
-		}
+	data = QPNP_ADC_CONV_REQ_EN;
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
 	}
 
 	return rc;
@@ -460,12 +259,10 @@
 	u8 data = 0;
 	int rc = 0;
 
-	if (chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-		if (rc < 0) {
-			pr_err("adc-tm enable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
+	if (rc < 0) {
+		pr_err("adc-tm enable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
@@ -538,132 +335,11 @@
 static int32_t qpnp_adc_tm_enable_if_channel_meas(
 					struct qpnp_adc_tm_chip *chip)
 {
-	u8 adc_tm_meas_en = 0, status_low = 0, status_high = 0;
 	int rc = 0;
 
-	if (chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_rc_check_channel_en(chip);
-		if (rc) {
-			pr_err("adc_tm channel check failed\n");
-			return rc;
-		}
-	} else {
-		/* Check if a measurement request is still required */
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							&adc_tm_meas_en, 1);
-		if (rc) {
-			pr_err("read status high failed with %d\n", rc);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							&status_low, 1);
-		if (rc) {
-			pr_err("read status low failed with %d\n", rc);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							&status_high, 1);
-		if (rc) {
-			pr_err("read status high failed with %d\n", rc);
-			return rc;
-		}
-
-		/* Enable only if there are pending measurement requests */
-		if ((adc_tm_meas_en && status_high) ||
-				(adc_tm_meas_en && status_low)) {
-			qpnp_adc_tm_enable(chip);
-
-			/* Request conversion */
-			rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ,
-							QPNP_CONV_REQ_SET, 1);
-			if (rc < 0) {
-				pr_err("adc-tm request conversion failed\n");
-				return rc;
-			}
-		} else {
-			/* disable the vote if applicable */
-			if (chip->adc_vote_enable && chip->adc->hkadc_ldo &&
-					chip->adc->hkadc_ldo_ok) {
-				qpnp_adc_disable_voltage(chip->adc);
-				chip->adc_vote_enable = false;
-			}
-		}
-	}
-
-	return rc;
-}
-
-static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip,
-								u8 mode_ctl)
-{
-	int rc;
-
-	mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN);
-
-	/* VADC_BTM current sets mode to recurring measurements */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl, 1);
-	if (rc < 0)
-		pr_err("adc-tm write mode selection err\n");
-
-	return rc;
-}
-
-static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip)
-{
-	u8 status1 = 0, mode_ctl = 0;
-	int rc, count = 0;
-
-	/* Re-enable the peripheral */
-	rc = qpnp_adc_tm_enable(chip);
+	rc = qpnp_adc_tm_rc_check_channel_en(chip);
 	if (rc) {
-		pr_err("adc-tm re-enable peripheral failed\n");
-		return rc;
-	}
-
-	/* The VADC_TM bank needs to be disabled for new conversion request */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return rc;
-	}
-
-	/* Disable the bank if a conversion is occurring */
-	while (status1 & QPNP_STATUS1_REQ_STS) {
-		if (count > QPNP_RETRY) {
-			pr_err("retry error=%d with 0x%x\n", count, status1);
-			break;
-		}
-		/*
-		 * Wait time is based on the optimum sampling rate
-		 * and adding enough time buffer to account for ADC conversions
-		 * occurring on different peripheral banks
-		 */
-		usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
-		rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1,
-							&status1, 1);
-		if (rc < 0) {
-			pr_err("adc-tm disable failed\n");
-			return rc;
-		}
-		count++;
-	}
-
-	if (!chip->adc_tm_hc) {
-		/* Change the mode back to recurring measurement mode */
-		mode_ctl = ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm mode change to recurring failed\n");
-			return rc;
-		}
-	}
-
-	/* Disable the peripheral */
-	rc = qpnp_adc_tm_disable(chip);
-	if (rc < 0) {
-		pr_err("adc-tm peripheral disable failed\n");
+		pr_err("adc_tm channel check failed\n");
 		return rc;
 	}
 
@@ -676,20 +352,11 @@
 	int rc = 0, i;
 	bool chan_found = false;
 
-	if (!chip->adc_tm_hc) {
-		for (i = 0; i < QPNP_ADC_TM_CHAN_NONE; i++) {
-			if (adc_tm_data[i].btm_amux_chan == btm_chan) {
-				*btm_chan_idx = i;
-				chan_found = true;
-			}
-		}
-	} else {
-		for (i = 0; i < chip->max_channels_available; i++) {
-			if (chip->sensor[i].btm_channel_num == btm_chan) {
-				*btm_chan_idx = i;
-				chan_found = true;
-				break;
-			}
+	for (i = 0; i < chip->max_channels_available; i++) {
+		if (chip->sensor[i].btm_channel_num == btm_chan) {
+			*btm_chan_idx = i;
+			chan_found = true;
+			break;
 		}
 	}
 
@@ -760,12 +427,7 @@
 
 	switch (chip->sensor[chan_idx].timer_select) {
 	case ADC_MEAS_TIMER_SELECT1:
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL,
-				chip->sensor[chan_idx].meas_interval, 1);
-		else
-			rc = qpnp_adc_tm_write_reg(chip,
+		rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL,
 				chip->sensor[chan_idx].meas_interval, 1);
 		if (rc < 0) {
@@ -775,12 +437,7 @@
 	break;
 	case ADC_MEAS_TIMER_SELECT2:
 		/* Thermal channels uses timer2, default to 1 second */
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_read_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				&meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_read_reg(chip,
+		rc = qpnp_adc_tm_read_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -791,12 +448,7 @@
 		timer_interval_store <<= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT;
 		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK;
 		meas_interval_timer2 |= timer_interval_store;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_write_reg(chip,
+		rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -805,12 +457,7 @@
 		}
 	break;
 	case ADC_MEAS_TIMER_SELECT3:
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_read_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				&meas_interval_timer2, 1);
-		else
-			rc = qpnp_adc_tm_read_reg(chip,
+		rc = qpnp_adc_tm_read_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				&meas_interval_timer2, 1);
 		if (rc < 0) {
@@ -820,11 +467,6 @@
 		timer_interval_store = chip->sensor[chan_idx].meas_interval;
 		timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK;
 		meas_interval_timer2 |= timer_interval_store;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_ADC_TM_MEAS_INTERVAL_CTL2,
-				meas_interval_timer2, 1);
-		else
 			rc = qpnp_adc_tm_write_reg(chip,
 				QPNP_BTM_MEAS_INTERVAL_CTL2,
 				meas_interval_timer2, 1);
@@ -844,14 +486,9 @@
 		pr_err("Invalid btm channel idx\n");
 		return rc;
 	}
-	if (!chip->adc_tm_hc)
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].meas_interval_ctl,
-				chip->sensor[chan_idx].timer_select, 1);
-	else
-		rc = qpnp_adc_tm_write_reg(chip,
-				QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
-				chip->sensor[chan_idx].timer_select, 1);
+	rc = qpnp_adc_tm_write_reg(chip,
+			QPNP_BTM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx),
+			chip->sensor[chan_idx].timer_select, 1);
 	if (rc < 0) {
 		pr_err("TM channel timer configure failed\n");
 		return rc;
@@ -934,67 +571,6 @@
 	return rc;
 }
 
-static int32_t qpnp_adc_tm_read_thr_value(struct qpnp_adc_tm_chip *chip,
-			uint32_t btm_chan)
-{
-	int rc = 0;
-	u8 data_lsb = 0, data_msb = 0;
-	uint32_t btm_chan_idx = 0;
-	int32_t low_thr = 0, high_thr = 0;
-
-	if (!chip->adc_tm_hc) {
-		pr_err("Not applicable for VADC HC peripheral\n");
-		return -EINVAL;
-	}
-
-	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
-	if (rc < 0) {
-		pr_err("Invalid btm channel idx\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
-			&data_lsb, 1);
-	if (rc < 0) {
-		pr_err("low threshold lsb setting failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].low_thr_msb_addr,
-		&data_msb, 1);
-	if (rc < 0) {
-		pr_err("low threshold msb setting failed\n");
-		return rc;
-	}
-
-	low_thr = (data_msb << 8) | data_lsb;
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
-		&data_lsb, 1);
-	if (rc < 0) {
-		pr_err("high threshold lsb setting failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip,
-		adc_tm_data[btm_chan_idx].high_thr_msb_addr,
-		&data_msb, 1);
-	if (rc < 0) {
-		pr_err("high threshold msb setting failed\n");
-		return rc;
-	}
-
-	high_thr = (data_msb << 8) | data_lsb;
-
-	pr_debug("configured thresholds high:0x%x and low:0x%x\n",
-		high_thr, low_thr);
-
-	return rc;
-}
-
 static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip,
 			uint32_t btm_chan, int32_t high_thr, int32_t low_thr)
 {
@@ -1007,69 +583,36 @@
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_lsb_addr,
-			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].low_thr_msb_addr,
-			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold msb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].high_thr_lsb_addr,
-			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
-		if (rc < 0) {
-			pr_err("high threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			adc_tm_data[btm_chan_idx].high_thr_msb_addr,
-			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
-		if (rc < 0)
-			pr_err("high threshold msb setting failed\n");
-	} else {
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
-			QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
-			QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
-		if (rc < 0) {
-			pr_err("low threshold msb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
-			QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
-		if (rc < 0) {
-			pr_err("high threshold lsb setting failed\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_write_reg(chip,
-			QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
-			QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
-		if (rc < 0)
-			pr_err("high threshold msb setting failed\n");
-
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_LOW_THR0(btm_chan_idx),
+		QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1);
+	if (rc < 0) {
+		pr_err("low threshold lsb setting failed\n");
+		return rc;
 	}
 
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_LOW_THR1(btm_chan_idx),
+		QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1);
+	if (rc < 0) {
+		pr_err("low threshold msb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx),
+		QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1);
+	if (rc < 0) {
+		pr_err("high threshold lsb setting failed\n");
+		return rc;
+	}
+
+	rc = qpnp_adc_tm_write_reg(chip,
+		QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx),
+		QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1);
+	if (rc < 0)
+		pr_err("high threshold msb setting failed\n");
+
 	pr_debug("client requested high:%d and low:%d\n",
 		high_thr, low_thr);
 
@@ -1206,14 +749,9 @@
 			pr_debug("low sensor mask:%x with state:%d\n",
 					sensor_mask, chan_prop->state_request);
 			/* Enable low threshold's interrupt */
-			if (!chip->adc_tm_hc)
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_ADC_TM_LOW_THR_INT_EN,
-					sensor_mask, true);
-			else
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_BTM_Mn_EN(btm_chan_idx),
-					QPNP_BTM_Mn_LOW_THR_INT_EN, true);
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, true);
 			if (rc < 0) {
 				pr_err("low thr enable err:%d\n", btm_chan);
 				return rc;
@@ -1223,14 +761,9 @@
 		if (high_thr_set) {
 			/* Enable high threshold's interrupt */
 			pr_debug("high sensor mask:%x\n", sensor_mask);
-			if (!chip->adc_tm_hc)
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_ADC_TM_HIGH_THR_INT_EN,
-					sensor_mask, true);
-			else
-				rc = qpnp_adc_tm_reg_update(chip,
-					QPNP_BTM_Mn_EN(btm_chan_idx),
-					QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
+			rc = qpnp_adc_tm_reg_update(chip,
+				QPNP_BTM_Mn_EN(btm_chan_idx),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, true);
 			if (rc < 0) {
 				pr_err("high thr enable err:%d\n", btm_chan);
 				return rc;
@@ -1239,11 +772,7 @@
 	}
 
 	/* Enable corresponding BTM channel measurement */
-	if (!chip->adc_tm_hc)
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true);
-	else
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_idx),
 			QPNP_BTM_Mn_MEAS_EN, true);
 	if (rc < 0) {
 		pr_err("multi measurement en failed\n");
@@ -1358,135 +887,12 @@
 	return 0;
 }
 
-static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip,
-			struct qpnp_adc_amux_properties *chan_prop)
-{
-	u8 decimation = 0, op_cntrl = 0, mode_ctl = 0;
-	int rc = 0;
-	uint32_t btm_chan = 0;
-
-	/* Set measurement in single measurement mode */
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-	if (rc < 0) {
-		pr_err("adc-tm single mode select failed\n");
-		return rc;
-	}
-
-	/* Disable bank */
-	rc = qpnp_adc_tm_disable(chip);
-	if (rc)
-		return rc;
-
-	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check(chip);
-	if (rc < 0) {
-		pr_err("adc-tm req_sts check failed\n");
-		return rc;
-	}
-
-	/* Configure AMUX channel select for the corresponding BTM channel*/
-	btm_chan = chan_prop->chan_prop->tm_channel_select;
-	rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel, 1);
-	if (rc < 0) {
-		pr_err("adc-tm channel selection err\n");
-		return rc;
-	}
-
-	/* Digital parameter setup */
-	decimation |= chan_prop->decimation <<
-				QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation, 1);
-	if (rc < 0) {
-		pr_err("adc-tm digital parameter setup err\n");
-		return rc;
-	}
-
-	/* Hardware setting time */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY,
-					chan_prop->hw_settle_time, 1);
-	if (rc < 0) {
-		pr_err("adc-tm hw settling time setup err\n");
-		return rc;
-	}
-
-	/* Fast averaging setup/enable */
-	rc = qpnp_adc_tm_fast_avg_en(chip, &chan_prop->fast_avg_setup);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg enable err\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL,
-				chan_prop->fast_avg_setup, 1);
-	if (rc < 0) {
-		pr_err("adc-tm fast-avg setup err\n");
-		return rc;
-	}
-
-	/* Measurement interval setup */
-	rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan,
-						chan_prop->chan_prop);
-	if (rc < 0) {
-		pr_err("adc-tm timer select failed\n");
-		return rc;
-	}
-
-	/* Channel configuration setup */
-	rc = qpnp_adc_tm_channel_configure(chip, btm_chan,
-			chan_prop->chan_prop, chan_prop->amux_channel);
-	if (rc < 0) {
-		pr_err("adc-tm channel configure failed\n");
-		return rc;
-	}
-
-	/* Recurring interval measurement enable */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
-							&op_cntrl, 1);
-	op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP;
-	rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL,
-			op_cntrl, true);
-	if (rc < 0) {
-		pr_err("adc-tm meas interval op configure failed\n");
-		return rc;
-	}
-
-	/* Enable bank */
-	rc = qpnp_adc_tm_enable(chip);
-	if (rc)
-		return rc;
-
-	/* Request conversion */
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1);
-	if (rc < 0) {
-		pr_err("adc-tm request conversion failed\n");
-		return rc;
-	}
-
-	return 0;
-}
-
-static int qpnp_adc_tm_get_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode *mode)
-{
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-
-	if ((IS_ERR(adc_tm)) || qpnp_adc_tm_check_revision(
-			adc_tm->chip, adc_tm->btm_channel_num))
-		return -EINVAL;
-
-	*mode = adc_tm->mode;
-
-	return 0;
-}
-
-static int qpnp_adc_tm_set_mode(struct thermal_zone_device *thermal,
+static int qpnp_adc_tm_set_mode(struct qpnp_adc_tm_sensor *adc_tm,
 			      enum thermal_device_mode mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	int rc = 0, channel;
-	u8 sensor_mask = 0, mode_ctl = 0;
+	u8 sensor_mask = 0;
 	uint32_t btm_chan_idx = 0, btm_chan = 0;
 
 	if (qpnp_adc_tm_is_valid(chip)) {
@@ -1525,32 +931,14 @@
 		chip->adc->amux_prop->calib_type =
 			chip->adc->adc_channels[channel].calib_type;
 
-		if (!chip->adc_tm_hc) {
-			rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-			if (rc) {
-				pr_err("adc-tm configure failed with %d\n", rc);
-				goto fail;
-			}
-		} else {
-			rc = qpnp_adc_tm_hc_configure(chip,
-							chip->adc->amux_prop);
-			if (rc) {
-				pr_err("hc configure failed with %d\n", rc);
-				goto fail;
-			}
+		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+		if (rc) {
+			pr_err("hc configure failed with %d\n", rc);
+			goto fail;
 		}
 	} else if (mode == THERMAL_DEVICE_DISABLED) {
 		sensor_mask = 1 << adc_tm->sensor_num;
 
-		if (!chip->adc_tm_hc) {
-			mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-			rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-			if (rc < 0) {
-				pr_err("adc-tm single mode select failed\n");
-				goto fail;
-			}
-		}
-
 		/* Disable bank */
 		rc = qpnp_adc_tm_disable(chip);
 		if (rc < 0) {
@@ -1558,28 +946,12 @@
 			goto fail;
 		}
 
-		if (!chip->adc_tm_hc) {
-			/* Check if a conversion is in progress */
-			rc = qpnp_adc_tm_req_sts_check(chip);
-			if (rc < 0) {
-				pr_err("adc-tm req_sts check failed\n");
-				goto fail;
-			}
-
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, false);
-			if (rc < 0) {
-				pr_err("multi measurement update failed\n");
-				goto fail;
-			}
-		} else {
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_MEAS_EN, false);
-			if (rc < 0) {
-				pr_err("multi measurement disable failed\n");
-				goto fail;
-			}
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0) {
+			pr_err("multi measurement disable failed\n");
+			goto fail;
 		}
 
 		rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -1597,11 +969,13 @@
 	return 0;
 }
 
-static int qpnp_adc_tm_get_trip_type(struct thermal_zone_device *thermal,
-				   int trip, enum thermal_trip_type *type)
+static int qpnp_adc_tm_activate_trip_type(struct qpnp_adc_tm_sensor *adc_tm,
+			int trip, enum thermal_trip_activation_mode mode)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
+	int rc = 0, sensor_mask = 0;
+	bool state = false;
+	uint32_t btm_chan_idx = 0, btm_chan = 0;
 
 	if (qpnp_adc_tm_is_valid(chip))
 		return -ENODEV;
@@ -1609,110 +983,48 @@
 	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
 		return -EINVAL;
 
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		*type = THERMAL_TRIP_CONFIGURABLE_HI;
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
-		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
-	break;
-	default:
-		return -EINVAL;
-	}
+	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
+		state = true;
 
-	return 0;
-}
+	sensor_mask = 1 << adc_tm->sensor_num;
 
-static int qpnp_adc_tm_get_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int *temp)
-{
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
-	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
-	int64_t result = 0;
-	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
-	unsigned int reg, rc = 0;
-	uint16_t reg_low_thr_lsb, reg_low_thr_msb;
-	uint16_t reg_high_thr_lsb, reg_high_thr_msb;
-	uint32_t btm_chan_idx = 0, btm_chan = 0;
+	pr_debug("Sensor number:%x with state:%d\n",
+					adc_tm->sensor_num, state);
 
-	if (qpnp_adc_tm_is_valid(chip))
-		return -ENODEV;
-
-	if (qpnp_adc_tm_check_revision(chip, adc_tm_sensor->btm_channel_num))
-		return -EINVAL;
-
-	btm_chan = adc_tm_sensor->btm_channel_num;
+	btm_chan = adc_tm->btm_channel_num;
 	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
 	if (rc < 0) {
 		pr_err("Invalid btm channel idx\n");
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
-		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
-		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
-		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
-	} else {
-		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
-		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
-		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
-		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
-	}
-
 	switch (trip) {
 	case ADC_TM_TRIP_HIGH_WARM:
-		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_lsb,
-						&trip_warm_thr0, 1);
-		if (rc) {
-			pr_err("adc-tm low_thr_lsb err\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, reg_low_thr_msb,
-						&trip_warm_thr1, 1);
-		if (rc) {
-			pr_err("adc-tm low_thr_msb err\n");
-			return rc;
-		}
-	reg = (trip_warm_thr1 << 8) | trip_warm_thr0;
+		/* low_thr (lower voltage) for higher temp */
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_LOW_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
 	break;
 	case ADC_TM_TRIP_LOW_COOL:
-		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_lsb,
-						&trip_cool_thr0, 1);
-		if (rc) {
-			pr_err("adc-tm_tm high_thr_lsb err\n");
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_read_reg(chip, reg_high_thr_msb,
-						&trip_cool_thr1, 1);
-		if (rc) {
-			pr_err("adc-tm_tm high_thr_lsb err\n");
-			return rc;
-		}
-	reg = (trip_cool_thr1 << 8) | trip_cool_thr0;
+		/* high_thr (higher voltage) for cooler temp */
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
+		if (rc)
+			pr_err("channel:%x failed\n", btm_chan);
 	break;
 	default:
 		return -EINVAL;
 	}
 
-	rc = qpnp_adc_tm_scale_voltage_therm_pu2(chip->vadc_dev,
-					chip->adc->adc_prop, reg, &result);
-	if (rc < 0) {
-		pr_err("Failed to lookup the therm thresholds\n");
-		return rc;
-	}
-
-	*temp = result;
-
-	return 0;
+	return rc;
 }
 
-static int qpnp_adc_tm_set_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int temp)
+static int qpnp_adc_tm_set_trip_temp(void *data, int low_temp, int high_temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm = data;
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 	struct qpnp_adc_tm_config tm_config;
 	u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1;
@@ -1729,19 +1041,18 @@
 
 	tm_config.channel = adc_tm->vadc_channel_num;
 	tm_config.high_thr_temp = tm_config.low_thr_temp = 0;
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		tm_config.high_thr_temp = temp;
-		break;
-	case ADC_TM_TRIP_LOW_COOL:
-		tm_config.low_thr_temp = temp;
-		break;
-	default:
+	if (high_temp != INT_MAX)
+		tm_config.high_thr_temp = high_temp;
+	if (low_temp != INT_MIN)
+		tm_config.low_thr_temp = low_temp;
+
+	if ((high_temp == INT_MAX) && (low_temp == INT_MIN)) {
+		pr_err("No trips to set\n");
 		return -EINVAL;
 	}
 
-	pr_debug("requested a high - %d and low - %d with trip - %d\n",
-			tm_config.high_thr_temp, tm_config.low_thr_temp, trip);
+	pr_debug("requested a high - %d and low - %d\n",
+			tm_config.high_thr_temp, tm_config.low_thr_temp);
 	rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->vadc_dev,
 				chip->adc->adc_prop, &tm_config);
 	if (rc < 0) {
@@ -1764,20 +1075,12 @@
 		return rc;
 	}
 
-	if (!chip->adc_tm_hc) {
-		reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr;
-		reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr;
-		reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr;
-		reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr;
-	} else {
-		reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
-		reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
-		reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
-		reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
-	}
+	reg_low_thr_lsb = QPNP_BTM_Mn_LOW_THR0(btm_chan_idx);
+	reg_low_thr_msb = QPNP_BTM_Mn_LOW_THR1(btm_chan_idx);
+	reg_high_thr_lsb = QPNP_BTM_Mn_HIGH_THR0(btm_chan_idx);
+	reg_high_thr_msb = QPNP_BTM_Mn_HIGH_THR1(btm_chan_idx);
 
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
+	if (high_temp != INT_MAX) {
 		rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb,
 						trip_cool_thr0, 1);
 		if (rc) {
@@ -1791,9 +1094,26 @@
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm->low_thr = tm_config.high_thr_voltage;
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
+		adc_tm->low_thr = tm_config.high_thr_voltage;
+
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_HIGH_WARM,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+		if (rc) {
+			pr_err("adc-tm warm activation failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_HIGH_WARM,
+				THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc) {
+			pr_err("adc-tm warm deactivation failed\n");
+			return rc;
+		}
+	}
+
+	if (low_temp != INT_MIN) {
 		rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb,
 						trip_warm_thr0, 1);
 		if (rc) {
@@ -1807,10 +1127,37 @@
 			pr_err("adc-tm_tm read threshold err\n");
 			return rc;
 		}
-	adc_tm->high_thr = tm_config.low_thr_voltage;
-	break;
-	default:
-		return -EINVAL;
+		adc_tm->high_thr = tm_config.low_thr_voltage;
+
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_LOW_COOL,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+		if (rc) {
+			pr_err("adc-tm cool activation failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_activate_trip_type(adc_tm,
+				ADC_TM_TRIP_LOW_COOL,
+				THERMAL_TRIP_ACTIVATION_DISABLED);
+		if (rc) {
+			pr_err("adc-tm cool deactivation failed\n");
+			return rc;
+		}
+	}
+
+	if ((high_temp != INT_MAX) || (low_temp != INT_MIN)) {
+		rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_ENABLED);
+		if (rc) {
+			pr_err("sensor enabled failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_DISABLED);
+		if (rc) {
+			pr_err("sensor disable failed\n");
+			return rc;
+		}
 	}
 
 	return 0;
@@ -1878,9 +1225,8 @@
 	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 
 	if (adc_tm->thermal_node) {
-		sysfs_notify(&adc_tm->tz_dev->device.kobj,
-					NULL, "type");
 		pr_debug("notifying uspace client\n");
+		of_thermal_handle_trip(adc_tm->tz_dev);
 	} else {
 		if (adc_tm->scale_type == SCALE_RBATT_THERM)
 			notify_battery_therm(adc_tm);
@@ -1891,222 +1237,23 @@
 	atomic_dec(&chip->wq_cnt);
 }
 
-static int qpnp_adc_tm_activate_trip_type(struct thermal_zone_device *thermal,
-			int trip, enum thermal_trip_activation_mode mode)
-{
-	struct qpnp_adc_tm_sensor *adc_tm = thermal->devdata;
-	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
-	int rc = 0, sensor_mask = 0;
-	u8 thr_int_en = 0;
-	bool state = false;
-	uint32_t btm_chan_idx = 0, btm_chan = 0;
-
-	if (qpnp_adc_tm_is_valid(chip))
-		return -ENODEV;
-
-	if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num))
-		return -EINVAL;
-
-	if (mode == THERMAL_TRIP_ACTIVATION_ENABLED)
-		state = true;
-
-	sensor_mask = 1 << adc_tm->sensor_num;
-
-	pr_debug("Sensor number:%x with state:%d\n",
-					adc_tm->sensor_num, state);
-
-	btm_chan = adc_tm->btm_channel_num;
-	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx);
-	if (rc < 0) {
-		pr_err("Invalid btm channel idx\n");
-		return rc;
-	}
-
-	switch (trip) {
-	case ADC_TM_TRIP_HIGH_WARM:
-		/* low_thr (lower voltage) for higher temp */
-		thr_int_en = adc_tm_data[btm_chan_idx].low_thr_int_chan_en;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_LOW_THR_INT_EN,
-				sensor_mask, state);
-		else
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_LOW_THR_INT_EN, state);
-		if (rc)
-			pr_err("channel:%x failed\n", btm_chan);
-	break;
-	case ADC_TM_TRIP_LOW_COOL:
-		/* high_thr (higher voltage) for cooler temp */
-		thr_int_en = adc_tm_data[btm_chan_idx].high_thr_int_chan_en;
-		if (!chip->adc_tm_hc)
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_ADC_TM_HIGH_THR_INT_EN,
-				sensor_mask, state);
-		else
-			rc = qpnp_adc_tm_reg_update(chip,
-				QPNP_BTM_Mn_EN(btm_chan_idx),
-				QPNP_BTM_Mn_HIGH_THR_INT_EN, state);
-		if (rc)
-			pr_err("channel:%x failed\n", btm_chan);
-	break;
-	default:
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip,
-			int sensor_num, u8 status_high, u8 *notify_check)
-{
-	int rc = 0;
-	u8 sensor_mask = 0, mode_ctl = 0;
-	int32_t old_thr = 0, new_thr = 0;
-	uint32_t channel, btm_chan_num, scale_type;
-	struct qpnp_vadc_result result;
-	struct qpnp_adc_thr_client_info *client_info = NULL;
-	struct list_head *thr_list;
-	bool status = false;
-
-	if (!chip->adc_tm_recalib_check) {
-		*notify_check = 1;
-		return rc;
-	}
-
-	list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) {
-		client_info = list_entry(thr_list,
-				struct qpnp_adc_thr_client_info, list);
-		channel = client_info->btm_param->channel;
-		btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
-		sensor_mask = 1 << sensor_num;
-
-		rc = qpnp_vadc_read(chip->vadc_dev, channel, &result);
-		if (rc < 0) {
-			pr_err("failure to read vadc channel=%d\n",
-					client_info->btm_param->channel);
-			goto fail;
-		}
-		new_thr = result.physical;
-
-		if (status_high)
-			old_thr = client_info->btm_param->high_thr;
-		else
-			old_thr = client_info->btm_param->low_thr;
-
-		if (new_thr > old_thr)
-			status = (status_high) ? true : false;
-		else
-			status = (status_high) ? false : true;
-
-		pr_debug(
-			"recalib:sen=%d, new_thr=%d, new_thr_adc_code=0x%x, old_thr=%d status=%d valid_status=%d\n",
-			sensor_num, new_thr, result.adc_code,
-			old_thr, status_high, status);
-
-		rc = qpnp_adc_tm_read_thr_value(chip, btm_chan_num);
-		if (rc < 0) {
-			pr_err("adc-tm thresholds read failed\n");
-			goto fail;
-		}
-
-		if (status) {
-			*notify_check = 1;
-			pr_debug("Client can be notify\n");
-			return rc;
-		}
-
-		pr_debug("Client can not be notify, restart measurement\n");
-		/* Set measurement in single measurement mode */
-		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm single mode select failed\n");
-			goto fail;
-		}
-
-		/* Disable bank */
-		rc = qpnp_adc_tm_disable(chip);
-		if (rc < 0) {
-			pr_err("adc-tm disable failed\n");
-			goto fail;
-		}
-
-		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc < 0) {
-			pr_err("adc-tm req_sts check failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int write failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int enable failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi measurement en failed\n");
-			goto fail;
-		}
-
-		/* restart measurement */
-		scale_type = chip->sensor[sensor_num].scale_type;
-		chip->adc->amux_prop->amux_channel = channel;
-		chip->adc->amux_prop->decimation =
-			chip->adc->adc_channels[sensor_num].adc_decimation;
-		chip->adc->amux_prop->hw_settle_time =
-			chip->adc->adc_channels[sensor_num].hw_settle_time;
-		chip->adc->amux_prop->fast_avg_setup =
-			chip->adc->adc_channels[sensor_num].fast_avg_setup;
-		chip->adc->amux_prop->mode_sel =
-			ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT;
-		adc_tm_rscale_fn[scale_type].chan(chip->vadc_dev,
-				client_info->btm_param,
-				&chip->adc->amux_prop->chan_prop->low_thr,
-				&chip->adc->amux_prop->chan_prop->high_thr);
-		qpnp_adc_tm_add_to_list(chip, sensor_num,
-				client_info->btm_param,
-				chip->adc->amux_prop->chan_prop);
-		chip->adc->amux_prop->chan_prop->tm_channel_select =
-				chip->sensor[sensor_num].btm_channel_num;
-		chip->adc->amux_prop->chan_prop->state_request =
-				client_info->btm_param->state_request;
-
-		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm configure failed with %d\n", rc);
-			goto fail;
-		}
-		*notify_check = 0;
-		pr_debug("BTM channel reconfigured for measuremnt\n");
-	}
-fail:
-	return rc;
-}
-
 static int qpnp_adc_tm_disable_rearm_high_thresholds(
 			struct qpnp_adc_tm_chip *chip, int sensor_num)
 {
 
 	struct qpnp_adc_thr_client_info *client_info = NULL;
 	struct list_head *thr_list;
-	uint32_t btm_chan_num = 0;
-	u8 sensor_mask = 0, notify_check = 0;
+	uint32_t btm_chan_num = 0, btm_chan_idx = 0;
+	u8 sensor_mask = 0;
 	int rc = 0;
 
 	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
 	pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 		sensor_num, chip->th_info.adc_tm_high_enable,
 		chip->th_info.adc_tm_low_enable,
@@ -2118,11 +1265,11 @@
 		 */
 		sensor_mask = 1 << sensor_num;
 		pr_debug("non thermal node - mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_recalib_request_check(chip,
-				sensor_num, true, &notify_check);
-		if (rc < 0 || !notify_check) {
-			pr_debug("Calib recheck re-armed rc=%d\n", rc);
-			chip->th_info.adc_tm_high_enable = 0;
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+		if (rc < 0) {
+			pr_err("high threshold int update failed\n");
 			return rc;
 		}
 	} else {
@@ -2134,7 +1281,7 @@
 		sensor_mask = 1 << sensor_num;
 		pr_debug("thermal node with mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
+			&chip->sensor[sensor_num],
 			ADC_TM_TRIP_LOW_COOL,
 			THERMAL_TRIP_ACTIVATION_DISABLED);
 		if (rc < 0) {
@@ -2159,22 +1306,12 @@
 	}
 	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_BTM_Mn_EN(sensor_num),
-			QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip,
+		QPNP_BTM_Mn_EN(sensor_num),
+		QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi meas disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2194,11 +1331,17 @@
 {
 	struct qpnp_adc_thr_client_info *client_info = NULL;
 	struct list_head *thr_list;
-	uint32_t btm_chan_num = 0;
-	u8 sensor_mask = 0, notify_check = 0;
+	uint32_t btm_chan_num = 0, btm_chan_idx = 0;
+	u8 sensor_mask = 0;
 	int rc = 0;
 
 	btm_chan_num = chip->sensor[sensor_num].btm_channel_num;
+	rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx);
+	if (rc < 0) {
+		pr_err("Invalid btm channel idx\n");
+		return rc;
+	}
+
 	pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n",
 		sensor_num, chip->th_info.adc_tm_high_enable,
 		chip->th_info.adc_tm_low_enable,
@@ -2208,20 +1351,13 @@
 		 * For non thermal registered clients such as usb_id,
 		 * vbatt, pmic_therm
 		 */
-		pr_debug("non thermal node - mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_recalib_request_check(chip,
-				sensor_num, false, &notify_check);
-		if (rc < 0 || !notify_check) {
-			pr_debug("Calib recheck re-armed rc=%d\n", rc);
-			chip->th_info.adc_tm_low_enable = 0;
-			return rc;
-		}
 		sensor_mask = 1 << sensor_num;
+		pr_debug("non thermal node - mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
+			QPNP_BTM_Mn_EN(btm_chan_idx),
+			QPNP_BTM_Mn_LOW_THR_INT_EN, false);
 		if (rc < 0) {
-			pr_err("low threshold int read failed\n");
+			pr_err("low threshold int update failed\n");
 			return rc;
 		}
 	} else {
@@ -2233,7 +1369,7 @@
 		sensor_mask = 1 << sensor_num;
 		pr_debug("thermal node with mask:%x\n", sensor_mask);
 		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
+			&chip->sensor[sensor_num],
 			ADC_TM_TRIP_HIGH_WARM,
 			THERMAL_TRIP_ACTIVATION_DISABLED);
 		if (rc < 0) {
@@ -2258,22 +1394,12 @@
 	}
 	qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_BTM_Mn_EN(sensor_num),
-			QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi meas disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip,
+		QPNP_BTM_Mn_EN(sensor_num),
+		QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi meas disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2299,14 +1425,6 @@
 
 	mutex_lock(&chip->adc->adc_lock);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc) {
-			pr_err("adc-tm-tm req sts check failed with %d\n", rc);
-			goto fail;
-		}
-	}
-
 	while (sensor_num < chip->max_channels_available) {
 		if (chip->sensor[sensor_num].high_thr_triggered) {
 			rc = qpnp_adc_tm_disable_rearm_high_thresholds(
@@ -2364,93 +1482,6 @@
 		pr_err("adc-tm high thr work failed\n");
 }
 
-static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data)
-{
-	struct qpnp_adc_tm_chip *chip = data;
-	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
-	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
-
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	/* Set measurement in single measurement mode */
-	qpnp_adc_tm_mode_select(chip, mode_ctl);
-
-	qpnp_adc_tm_disable(chip);
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH,
-					&chip->th_info.status_high, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-				&chip->th_info.adc_tm_high_thr_set, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read high thr failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	/*
-	 * Check which interrupt threshold is lower and measure against the
-	 * enabled channel.
-	 */
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-				&chip->th_info.qpnp_adc_tm_meas_en, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	chip->th_info.adc_tm_high_enable = chip->th_info.qpnp_adc_tm_meas_en &
-						chip->th_info.status_high;
-	chip->th_info.adc_tm_high_enable &= chip->th_info.adc_tm_high_thr_set;
-
-	sensor_notify_num = chip->th_info.adc_tm_high_enable;
-	while (i < chip->max_channels_available) {
-		if ((sensor_notify_num & 0x1) == 1)
-			sensor_num = i;
-		sensor_notify_num >>= 1;
-		i++;
-	}
-
-	if (!chip->sensor[sensor_num].thermal_node) {
-		sensor_mask = 1 << sensor_num;
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_HIGH_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int read failed\n");
-			return IRQ_HANDLED;
-		}
-	} else {
-		/*
-		 * Uses the thermal sysfs registered device to disable
-		 * the corresponding high voltage threshold which
-		 * is triggered by low temp
-		 */
-		pr_debug("thermal node with mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
-			ADC_TM_TRIP_LOW_COOL,
-			THERMAL_TRIP_ACTIVATION_DISABLED);
-		if (rc < 0) {
-			pr_err("notify error:%d\n", sensor_num);
-			return IRQ_HANDLED;
-		}
-	}
-
-	atomic_inc(&chip->wq_cnt);
-	queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
-
-	return IRQ_HANDLED;
-}
-
 static void qpnp_adc_tm_low_thr_work(struct work_struct *work)
 {
 	struct qpnp_adc_tm_chip *chip = container_of(work,
@@ -2471,88 +1502,6 @@
 		pr_err("adc-tm low thr work failed\n");
 }
 
-static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data)
-{
-	struct qpnp_adc_tm_chip *chip = data;
-	u8 mode_ctl = 0, status1 = 0, sensor_mask = 0;
-	int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0;
-
-	mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	/* Set measurement in single measurement mode */
-	qpnp_adc_tm_mode_select(chip, mode_ctl);
-
-	qpnp_adc_tm_disable(chip);
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc) {
-		pr_err("adc-tm read status1 failed\n");
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW,
-					&chip->th_info.status_low, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status low failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-				&chip->th_info.adc_tm_low_thr_set, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read low thr failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-				&chip->th_info.qpnp_adc_tm_meas_en, 1);
-	if (rc) {
-		pr_err("adc-tm-tm read status high failed with %d\n", rc);
-		return IRQ_HANDLED;
-	}
-
-	chip->th_info.adc_tm_low_enable = chip->th_info.qpnp_adc_tm_meas_en &
-					chip->th_info.status_low;
-	chip->th_info.adc_tm_low_enable &= chip->th_info.adc_tm_low_thr_set;
-
-	sensor_notify_num = chip->th_info.adc_tm_low_enable;
-	while (i < chip->max_channels_available) {
-		if ((sensor_notify_num & 0x1) == 1)
-			sensor_num = i;
-		sensor_notify_num >>= 1;
-		i++;
-	}
-
-	if (!chip->sensor[sensor_num].thermal_node) {
-		sensor_mask = 1 << sensor_num;
-		rc = qpnp_adc_tm_reg_update(chip,
-			QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int read failed\n");
-			return IRQ_HANDLED;
-		}
-	} else {
-		/* Uses the thermal sysfs registered device to disable
-		 * the corresponding low voltage threshold which
-		 * is triggered by high temp
-		 */
-		pr_debug("thermal node with mask:%x\n", sensor_mask);
-		rc = qpnp_adc_tm_activate_trip_type(
-			chip->sensor[sensor_num].tz_dev,
-			ADC_TM_TRIP_HIGH_WARM,
-			THERMAL_TRIP_ACTIVATION_DISABLED);
-		if (rc < 0) {
-			pr_err("notify error:%d\n", sensor_num);
-			return IRQ_HANDLED;
-		}
-	}
-
-	atomic_inc(&chip->wq_cnt);
-	queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
-
-	return IRQ_HANDLED;
-}
-
 static int qpnp_adc_tm_rc_check_sensor_trip(struct qpnp_adc_tm_chip *chip,
 			u8 status_low, u8 status_high, int i,
 			int *sensor_low_notify_num, int *sensor_high_notify_num)
@@ -2587,7 +1536,7 @@
 			 */
 			pr_debug("thermal node with mask:%x\n", sensor_mask);
 				rc = qpnp_adc_tm_activate_trip_type(
-					chip->sensor[i].tz_dev,
+					&chip->sensor[i],
 					ADC_TM_TRIP_HIGH_WARM,
 					THERMAL_TRIP_ACTIVATION_DISABLED);
 				if (rc < 0) {
@@ -2618,7 +1567,7 @@
 			 */
 				pr_debug("thermal node with mask:%x\n", i);
 				rc = qpnp_adc_tm_activate_trip_type(
-					chip->sensor[i].tz_dev,
+					&chip->sensor[i],
 					ADC_TM_TRIP_LOW_COOL,
 					THERMAL_TRIP_ACTIVATION_DISABLED);
 				if (rc < 0) {
@@ -2688,10 +1637,9 @@
 	return IRQ_HANDLED;
 }
 
-static int qpnp_adc_read_temp(struct thermal_zone_device *thermal,
-			     int *temp)
+static int qpnp_adc_read_temp(void *data, int *temp)
 {
-	struct qpnp_adc_tm_sensor *adc_tm_sensor = thermal->devdata;
+	struct qpnp_adc_tm_sensor *adc_tm_sensor = data;
 	struct qpnp_adc_tm_chip *chip = adc_tm_sensor->chip;
 	struct qpnp_vadc_result result;
 	int rc = 0;
@@ -2706,14 +1654,9 @@
 	return rc;
 }
 
-static struct thermal_zone_device_ops qpnp_adc_tm_thermal_ops = {
+static struct thermal_zone_of_device_ops qpnp_adc_tm_thermal_ops = {
 	.get_temp = qpnp_adc_read_temp,
-	.get_mode = qpnp_adc_tm_get_mode,
-	.set_mode = qpnp_adc_tm_set_mode,
-	.get_trip_type = qpnp_adc_tm_get_trip_type,
-	.activate_trip_type = qpnp_adc_tm_activate_trip_type,
-	.get_trip_temp = qpnp_adc_tm_get_trip_temp,
-	.set_trip_temp = qpnp_adc_tm_set_trip_temp,
+	.set_trips = qpnp_adc_tm_set_trip_temp,
 };
 
 int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
@@ -2807,18 +1750,11 @@
 					param->state_request;
 	chip->adc->amux_prop->calib_type =
 			chip->adc->adc_channels[dt_index].calib_type;
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm configure failed with %d\n", rc);
-			goto fail_unlock;
-		}
-	} else {
-		rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
-		if (rc) {
-			pr_err("adc-tm hc configure failed with %d\n", rc);
-			goto fail_unlock;
-		}
+
+	rc = qpnp_adc_tm_hc_configure(chip, chip->adc->amux_prop);
+	if (rc) {
+		pr_err("adc-tm hc configure failed with %d\n", rc);
+		goto fail_unlock;
 	}
 
 	chip->sensor[dt_index].scale_type = scale_type;
@@ -2834,7 +1770,6 @@
 					struct qpnp_adc_tm_btm_param *param)
 {
 	uint32_t channel, dt_index = 0, btm_chan_num;
-	u8 sensor_mask = 0, mode_ctl = 0;
 	int rc = 0;
 
 	if (qpnp_adc_tm_is_valid(chip))
@@ -2842,16 +1777,6 @@
 
 	mutex_lock(&chip->adc->adc_lock);
 
-	if (!chip->adc_tm_hc) {
-		/* Set measurement in single measurement mode */
-		mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-		rc = qpnp_adc_tm_mode_select(chip, mode_ctl);
-		if (rc < 0) {
-			pr_err("adc-tm single mode select failed\n");
-			goto fail;
-		}
-	}
-
 	/* Disable bank */
 	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0) {
@@ -2859,15 +1784,6 @@
 		goto fail;
 	}
 
-	if (!chip->adc_tm_hc) {
-		/* Check if a conversion is in progress */
-		rc = qpnp_adc_tm_req_sts_check(chip);
-		if (rc < 0) {
-			pr_err("adc-tm req_sts check failed\n");
-			goto fail;
-		}
-	}
-
 	channel = param->channel;
 	while ((chip->adc->adc_channels[dt_index].channel_num
 		!= channel) && (dt_index < chip->max_channels_available))
@@ -2881,50 +1797,25 @@
 
 	btm_chan_num = chip->sensor[dt_index].btm_channel_num;
 
-	if (!chip->adc_tm_hc) {
-		sensor_mask = 1 << chip->sensor[dt_index].sensor_num;
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
+	if (rc < 0) {
+		pr_err("high thr disable err:%d\n", btm_chan_num);
+		return rc;
+	}
 
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("low threshold int write failed\n");
-			goto fail;
-		}
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_LOW_THR_INT_EN, false);
+	if (rc < 0) {
+		pr_err("low thr disable err:%d\n", btm_chan_num);
+		return rc;
+	}
 
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("high threshold int enable failed\n");
-			goto fail;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-			sensor_mask, false);
-		if (rc < 0) {
-			pr_err("multi measurement en failed\n");
-			goto fail;
-		}
-	} else {
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_HIGH_THR_INT_EN, false);
-		if (rc < 0) {
-			pr_err("high thr disable err:%d\n", btm_chan_num);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_LOW_THR_INT_EN, false);
-		if (rc < 0) {
-			pr_err("low thr disable err:%d\n", btm_chan_num);
-			return rc;
-		}
-
-		rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
-					QPNP_BTM_Mn_MEAS_EN, false);
-		if (rc < 0) {
-			pr_err("multi measurement disable failed\n");
-			return rc;
-		}
+	rc = qpnp_adc_tm_reg_update(chip, QPNP_BTM_Mn_EN(btm_chan_num),
+				QPNP_BTM_Mn_MEAS_EN, false);
+	if (rc < 0) {
+		pr_err("multi measurement disable failed\n");
+		return rc;
 	}
 
 	rc = qpnp_adc_tm_enable_if_channel_meas(chip);
@@ -2938,22 +1829,6 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas);
 
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
-				struct qpnp_adc_tm_btm_param *param)
-{
-	param->channel = LR_MUX10_PU2_AMUX_USB_ID_LV;
-	return qpnp_adc_tm_channel_measure(chip, param);
-}
-EXPORT_SYMBOL(qpnp_adc_tm_usbid_configure);
-
-int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
-{
-	struct qpnp_adc_tm_btm_param param;
-
-	return qpnp_adc_tm_disable_chan_meas(chip, &param);
-}
-EXPORT_SYMBOL(qpnp_adc_tm_usbid_end);
-
 struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name)
 {
 	struct qpnp_adc_tm_chip *chip;
@@ -2974,35 +1849,6 @@
 }
 EXPORT_SYMBOL(qpnp_get_adc_tm);
 
-static int qpnp_adc_tm_initial_setup(struct qpnp_adc_tm_chip *chip)
-{
-	u8 thr_init = 0;
-	int rc = 0;
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("high thr init failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("low thr init failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN,
-							thr_init, 1);
-	if (rc < 0) {
-		pr_err("multi meas en failed\n");
-		return rc;
-	}
-
-	return rc;
-}
-
 static const struct of_device_id qpnp_adc_tm_match_table[] = {
 	{	.compatible = "qcom,qpnp-adc-tm" },
 	{	.compatible = "qcom,qpnp-adc-tm-hc" },
@@ -3055,10 +1901,8 @@
 		goto fail;
 	}
 
-	if (of_device_is_compatible(node, "qcom,qpnp-adc-tm-hc")) {
-		chip->adc_tm_hc = true;
-		chip->adc->adc_hc = true;
-	}
+	chip->adc_tm_hc = true;
+	chip->adc->adc_hc = true;
 
 	rc = qpnp_adc_get_devicetree_data(pdev, chip->adc);
 	if (rc) {
@@ -3067,25 +1911,6 @@
 	}
 	mutex_init(&chip->adc->adc_lock);
 
-	/* Register the ADC peripheral interrupt */
-	if (!chip->adc_tm_hc) {
-		chip->adc->adc_high_thr_irq = platform_get_irq_byname(pdev,
-						"high-thr-en-set");
-		if (chip->adc->adc_high_thr_irq < 0) {
-			pr_err("Invalid irq\n");
-			rc = -ENXIO;
-			goto fail;
-		}
-
-		chip->adc->adc_low_thr_irq = platform_get_irq_byname(pdev,
-						"low-thr-en-set");
-		if (chip->adc->adc_low_thr_irq < 0) {
-			pr_err("Invalid irq\n");
-			rc = -ENXIO;
-			goto fail;
-		}
-	}
-
 	chip->vadc_dev = qpnp_get_vadc(&pdev->dev, "adc_tm");
 	if (IS_ERR(chip->vadc_dev)) {
 		rc = PTR_ERR(chip->vadc_dev);
@@ -3155,10 +1980,11 @@
 			chip->sensor[sen_idx].high_thr =
 						QPNP_ADC_TM_M0_HIGH_THR;
 			chip->sensor[sen_idx].tz_dev =
-				thermal_zone_device_register(name,
-				ADC_TM_TRIP_NUM, ADC_TM_WRITABLE_TRIPS_MASK,
+				devm_thermal_zone_of_sensor_register(
+				chip->dev,
+				chip->sensor[sen_idx].vadc_channel_num,
 				&chip->sensor[sen_idx],
-				&qpnp_adc_tm_thermal_ops, NULL, 0, 0);
+				&qpnp_adc_tm_thermal_ops);
 			if (IS_ERR(chip->sensor[sen_idx].tz_dev))
 				pr_err("thermal device register failed.\n");
 		}
@@ -3173,18 +1999,21 @@
 		sen_idx++;
 	}
 	chip->max_channels_available = count_adc_channel_list;
+
 	chip->high_thr_wq = alloc_workqueue("qpnp_adc_tm_high_thr_wq",
 							WQ_HIGHPRI, 0);
 	if (!chip->high_thr_wq) {
 		pr_err("Requesting high thr priority wq failed\n");
 		goto fail;
 	}
+
 	chip->low_thr_wq = alloc_workqueue("qpnp_adc_tm_low_thr_wq",
 							WQ_HIGHPRI, 0);
 	if (!chip->low_thr_wq) {
 		pr_err("Requesting low thr priority wq failed\n");
 		goto fail;
 	}
+
 	chip->thr_wq = alloc_workqueue("qpnp_adc_tm_thr_wq",
 						WQ_HIGHPRI, 0);
 	if (!chip->thr_wq) {
@@ -3196,39 +2025,13 @@
 	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
 	atomic_set(&chip->wq_cnt, 0);
 
-	if (!chip->adc_tm_hc) {
-		rc = qpnp_adc_tm_initial_setup(chip);
-		if (rc)
-			goto fail;
-
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_high_thr_irq,
-				qpnp_adc_tm_high_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip);
-		if (rc) {
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-			goto fail;
-		} else {
-			enable_irq_wake(chip->adc->adc_high_thr_irq);
-		}
-
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_low_thr_irq,
-					qpnp_adc_tm_low_thr_isr,
-			IRQF_TRIGGER_RISING, "qpnp_adc_tm_low_interrupt", chip);
-		if (rc) {
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-			goto fail;
-		} else {
-			enable_irq_wake(chip->adc->adc_low_thr_irq);
-		}
-	} else {
-		rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
-				qpnp_adc_tm_rc_thr_isr,
-			IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
-		if (rc)
-			dev_err(&pdev->dev, "failed to request adc irq\n");
-		else
-			enable_irq_wake(chip->adc->adc_irq_eoc);
-	}
+	rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
+			qpnp_adc_tm_rc_thr_isr,
+		IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+	if (rc)
+		dev_err(&pdev->dev, "failed to request adc irq\n");
+	else
+		enable_irq_wake(chip->adc->adc_irq_eoc);
 
 	chip->adc_vote_enable = false;
 	dev_set_drvdata(&pdev->dev, chip);
@@ -3258,17 +2061,11 @@
 {
 	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
 	struct device_node *node = pdev->dev.of_node, *child;
-	bool thermal_node = false;
 	int i = 0;
 
 	for_each_child_of_node(node, child) {
-		thermal_node = of_property_read_bool(child,
-					"qcom,thermal-node");
-		if (thermal_node) {
-			thermal_zone_device_unregister(chip->sensor[i].tz_dev);
-			if (chip->sensor[i].req_wq)
-				destroy_workqueue(chip->sensor[i].req_wq);
-		}
+		if (chip->sensor[i].req_wq)
+			destroy_workqueue(chip->sensor[i].req_wq);
 		i++;
 	}
 
@@ -3286,40 +2083,20 @@
 static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
 {
 	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
-	int rc = 0;
-	u8 reg_val = 0, status1 = 0, en_ctl1 = 0;
-
-	/* Set measurement in single measurement mode */
-	reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
-	rc = qpnp_adc_tm_mode_select(chip, reg_val);
-	if (rc < 0)
-		pr_err("adc-tm single mode select failed\n");
+	int rc = 0, i = 0;
 
 	/* Disable bank */
 	rc = qpnp_adc_tm_disable(chip);
 	if (rc < 0)
 		pr_err("adc-tm disable failed\n");
 
-	/* Check if a conversion is in progress */
-	rc = qpnp_adc_tm_req_sts_check(chip);
-	if (rc < 0)
-		pr_err("adc-tm req_sts check failed\n");
-
-	/* Disable multimeasurement */
-	reg_val = 0;
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
-	if (rc < 0)
-		pr_err("adc-tm multi-measurement mode disable failed\n");
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
-	if (rc < 0)
-		pr_err("adc-tm status1 read failed\n");
-
-	rc = qpnp_adc_tm_read_reg(chip, QPNP_EN_CTL1, &en_ctl1, 1);
-	if (rc < 0)
-		pr_err("adc-tm en_ctl1 read failed\n");
-
-	pr_debug("adc-tm status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+	for (i = 0; i < QPNP_BTM_CHANNELS; i++) {
+		rc = qpnp_adc_tm_reg_update(chip,
+			QPNP_BTM_Mn_EN(i),
+			QPNP_BTM_Mn_MEAS_EN, false);
+		if (rc < 0)
+			pr_err("multi measurement disable failed\n");
+	}
 }
 
 static int qpnp_adc_tm_suspend_noirq(struct device *dev)
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index e86a297..09c95e5 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -28,6 +28,8 @@
 #include <linux/thermal.h>
 #include <linux/qpnp/qpnp-adc.h>
 
+#include "thermal_core.h"
+
 #define QPNP_TM_DRIVER_NAME "qcom,qpnp-temp-alarm"
 
 enum qpnp_tm_registers {
@@ -97,7 +99,6 @@
 	unsigned int			subtype;
 	enum qpnp_tm_adc_type		adc_type;
 	int				temperature;
-	enum thermal_device_mode	mode;
 	unsigned int			thresh;
 	unsigned int			clock_rate;
 	unsigned int			stage;
@@ -105,18 +106,12 @@
 	int				irq;
 	enum qpnp_vadc_channels		adc_channel;
 	u16				base_addr;
-	bool				allow_software_override;
 	struct qpnp_vadc_chip		*vadc_dev;
 };
 
 /* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
 #define STATUS_REGISTER_DELAY_MS       40
 
-enum pmic_thermal_override_mode {
-	SOFTWARE_OVERRIDE_DISABLED = 0,
-	SOFTWARE_OVERRIDE_ENABLED,
-};
-
 /* This array maps from GEN2 alarm state to GEN1 alarm stage */
 const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
 
@@ -156,28 +151,6 @@
 	return rc;
 }
 
-
-static inline int qpnp_tm_shutdown_override(struct qpnp_tm_chip *chip,
-			    enum pmic_thermal_override_mode mode)
-{
-	int rc = 0;
-	u8 reg;
-
-	if (chip->allow_software_override) {
-		reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
-		reg |= (chip->clock_rate << SHUTDOWN_CTRL1_CLK_RATE_SHIFT)
-			& SHUTDOWN_CTRL1_CLK_RATE_MASK;
-
-		if (mode == SOFTWARE_OVERRIDE_ENABLED)
-			reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2
-				| SHUTDOWN_CTRL1_OVERRIDE_STAGE3;
-
-		rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
-	}
-
-	return rc;
-}
-
 static int qpnp_tm_update_temp(struct qpnp_tm_chip *chip)
 {
 	struct qpnp_vadc_result adc_result;
@@ -274,10 +247,9 @@
 	return 0;
 }
 
-static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
-				     int *temperature)
+static int qpnp_tz_get_temp_no_adc(void *data, int *temperature)
 {
-	struct qpnp_tm_chip *chip = thermal->devdata;
+	struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
 	int rc;
 
 	if (!temperature)
@@ -292,10 +264,9 @@
 	return 0;
 }
 
-static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
-				      int *temperature)
+static int qpnp_tz_get_temp_qpnp_adc(void *data, int *temperature)
 {
-	struct qpnp_tm_chip *chip = thermal->devdata;
+	struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
 	int rc;
 
 	if (!temperature)
@@ -314,121 +285,12 @@
 	return 0;
 }
 
-static int qpnp_tz_get_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode *mode)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-
-	if (!mode)
-		return -EINVAL;
-
-	*mode = chip->mode;
-
-	return 0;
-}
-
-static int qpnp_tz_set_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode mode)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-	int rc = 0;
-
-	if (mode != chip->mode) {
-		if (mode == THERMAL_DEVICE_ENABLED)
-			rc = qpnp_tm_shutdown_override(chip,
-				SOFTWARE_OVERRIDE_ENABLED);
-		else
-			rc = qpnp_tm_shutdown_override(chip,
-				SOFTWARE_OVERRIDE_DISABLED);
-
-		chip->mode = mode;
-	}
-
-	return rc;
-}
-
-static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
-				   int trip, enum thermal_trip_type *type)
-{
-	if (trip < 0 || !type)
-		return -EINVAL;
-
-	switch (trip) {
-	case TRIP_STAGE3:
-		*type = THERMAL_TRIP_CRITICAL;
-		break;
-	case TRIP_STAGE2:
-		*type = THERMAL_TRIP_HOT;
-		break;
-	case TRIP_STAGE1:
-		*type = THERMAL_TRIP_HOT;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int *temperature)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-	int thresh_temperature;
-
-	if (trip < 0 || !temperature)
-		return -EINVAL;
-
-	thresh_temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN;
-
-	switch (trip) {
-	case TRIP_STAGE3:
-		thresh_temperature += 2 * TEMP_STAGE_STEP;
-		break;
-	case TRIP_STAGE2:
-		thresh_temperature += TEMP_STAGE_STEP;
-		break;
-	case TRIP_STAGE1:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	*temperature = thresh_temperature;
-
-	return 0;
-}
-
-static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
-				   int *temperature)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-
-	if (!temperature)
-		return -EINVAL;
-
-	*temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
-		2 * TEMP_STAGE_STEP;
-
-	return 0;
-}
-
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_no_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_no_adc = {
 	.get_temp = qpnp_tz_get_temp_no_adc,
-	.get_mode = qpnp_tz_get_mode,
-	.set_mode = qpnp_tz_set_mode,
-	.get_trip_type = qpnp_tz_get_trip_type,
-	.get_trip_temp = qpnp_tz_get_trip_temp,
-	.get_crit_temp = qpnp_tz_get_crit_temp,
 };
 
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
 	.get_temp = qpnp_tz_get_temp_qpnp_adc,
-	.get_mode = qpnp_tz_get_mode,
-	.set_mode = qpnp_tz_set_mode,
-	.get_trip_type = qpnp_tz_get_trip_type,
-	.get_trip_temp = qpnp_tz_get_trip_temp,
-	.get_crit_temp = qpnp_tz_get_crit_temp,
 };
 
 static void qpnp_tm_work(struct work_struct *work)
@@ -474,11 +336,7 @@
 				chip->tm_name, stage_new, chip->stage,
 				chip->thresh, chip->temperature);
 
-		thermal_zone_device_update(chip->tz_dev,
-						THERMAL_EVENT_UNSPECIFIED);
-
-		/* Notify user space */
-		sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
+		of_thermal_handle_trip(chip->tz_dev);
 	}
 
 bail:
@@ -539,7 +397,7 @@
 	struct device_node *node;
 	unsigned int base;
 	struct qpnp_tm_chip *chip;
-	struct thermal_zone_device_ops *tz_ops;
+	struct thermal_zone_of_device_ops *tz_ops;
 	char *tm_name;
 	u32 default_temperature;
 	int rc = 0;
@@ -640,9 +498,6 @@
 	else
 		tz_ops = &qpnp_thermal_zone_ops_no_adc;
 
-	chip->allow_software_override
-		= of_property_read_bool(node, "qcom,allow-override");
-
 	default_temperature = DEFAULT_NO_ADC_TEMP;
 	rc = of_property_read_u32(node, "qcom,default-temp",
 					&default_temperature);
@@ -686,18 +541,8 @@
 		}
 	}
 
-	/* Start in HW control; switch to SW control when user changes mode. */
-	chip->mode = THERMAL_DEVICE_DISABLED;
-	rc = qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
-	if (rc) {
-		dev_err(&pdev->dev,
-			"%s: qpnp_tm_shutdown_override() failed, rc=%d\n",
-			__func__, rc);
-		goto err_cancel_work;
-	}
-
-	chip->tz_dev = thermal_zone_device_register(tm_name, TRIP_NUM, 0, chip,
-			tz_ops, NULL, 0, 0);
+	chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+							tz_ops);
 	if (chip->tz_dev == NULL) {
 		dev_err(&pdev->dev,
 			"%s: thermal_zone_device_register() failed.\n",
@@ -717,7 +562,7 @@
 	return 0;
 
 err_free_tz:
-	thermal_zone_device_unregister(chip->tz_dev);
+	thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
 err_cancel_work:
 	cancel_delayed_work_sync(&chip->irq_work);
 	kfree(chip->tm_name);
@@ -731,10 +576,9 @@
 {
 	struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
 
+	thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
 	dev_set_drvdata(&pdev->dev, NULL);
-	thermal_zone_device_unregister(chip->tz_dev);
 	kfree(chip->tm_name);
-	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
 	free_irq(chip->irq, chip);
 	cancel_delayed_work_sync(&chip->irq_work);
 	kfree(chip);
@@ -742,38 +586,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int qpnp_tm_suspend(struct device *dev)
-{
-	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
-	/* Clear override bits in suspend to allow hardware control */
-	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
-
-	return 0;
-}
-
-static int qpnp_tm_resume(struct device *dev)
-{
-	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
-	/* Override hardware actions so software can control */
-	if (chip->mode == THERMAL_DEVICE_ENABLED)
-		qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
-
-	return 0;
-}
-
-static const struct dev_pm_ops qpnp_tm_pm_ops = {
-	.suspend = qpnp_tm_suspend,
-	.resume = qpnp_tm_resume,
-};
-
-#define QPNP_TM_PM_OPS	(&qpnp_tm_pm_ops)
-#else
-#define QPNP_TM_PM_OPS	NULL
-#endif
-
 static const struct of_device_id qpnp_tm_match_table[] = {
 	{ .compatible = QPNP_TM_DRIVER_NAME, },
 	{}
@@ -789,7 +601,6 @@
 		.name		= QPNP_TM_DRIVER_NAME,
 		.of_match_table	= qpnp_tm_match_table,
 		.owner		= THIS_MODULE,
-		.pm		= QPNP_TM_PM_OPS,
 	},
 	.probe	  = qpnp_tm_probe,
 	.remove	  = qpnp_tm_remove,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6e3e636..22d32d2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5621,17 +5621,15 @@
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
-	const struct pciserial_board *board;
+	struct serial_private *new;
 
 	if (!priv)
 		return;
 
-	board = priv->board;
-	kfree(priv);
-	priv = pciserial_init_ports(dev, board);
-
-	if (!IS_ERR(priv)) {
-		pci_set_drvdata(dev, priv);
+	new = pciserial_init_ports(dev, priv->board);
+	if (!IS_ERR(new)) {
+		pci_set_drvdata(dev, new);
+		kfree(priv);
 	}
 }
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index df9be34..6a3f2ac 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/console.h>
 #include <linux/io.h>
+#include <linux/ipc_logging.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -41,7 +42,7 @@
 #define SE_UART_RX_STALE_CNT		(0x294)
 #define SE_UART_TX_PARITY_CFG		(0x2A4)
 #define SE_UART_RX_PARITY_CFG		(0x2A8)
-#define SE_UART_MANUAL_RFT		(0x2AC)
+#define SE_UART_MANUAL_RFR		(0x2AC)
 
 /* SE_UART_LOOPBACK_CFG */
 #define NO_LOOPBACK		(0)
@@ -84,6 +85,11 @@
 #define PAR_SPACE		(0x10)
 #define PAR_MARK		(0x11)
 
+/* SE_UART_MANUAL_RFR register fields */
+#define UART_MANUAL_RFR_EN	(BIT(31))
+#define UART_RFR_NOT_READY	(BIT(1))
+#define UART_RFR_READY		(BIT(0))
+
 /* UART M_CMD OP codes */
 #define UART_START_TX		(0x1)
 #define UART_START_BREAK	(0x4)
@@ -94,13 +100,26 @@
 
 #define UART_OVERSAMPLING	(32)
 #define STALE_TIMEOUT		(16)
+#define DEFAULT_BITS_PER_CHAR	(10)
 #define GENI_UART_NR_PORTS	(15)
+#define GENI_UART_CONS_PORTS	(1)
 #define DEF_FIFO_DEPTH_WORDS	(16)
+#define DEF_TX_WM		(2)
 #define DEF_FIFO_WIDTH_BITS	(32)
 #define UART_CORE2X_VOTE	(10000)
 #define DEFAULT_SE_CLK		(19200000)
 #define DEFAULT_BUS_WIDTH	(4)
 
+#define WAKEBYTE_TIMEOUT_MSEC	(2000)
+#define IPC_LOG_PWR_PAGES	(2)
+#define IPC_LOG_MISC_PAGES	(2)
+#define IPC_LOG_TX_RX_PAGES	(3)
+#define DATA_BYTES_PER_LINE	(32)
+
+#define IPC_LOG_MSG(ctx, x...) do { \
+	if (ctx) \
+		ipc_log_string(ctx, x); \
+} while (0)
 
 struct msm_geni_serial_port {
 	struct uart_port uport;
@@ -121,6 +140,14 @@
 			unsigned int rx_last);
 	struct se_geni_rsc serial_rsc;
 	int loopback;
+	int wakeup_irq;
+	unsigned char wakeup_byte;
+	struct wakeup_source geni_wake;
+	void *ipc_log_tx;
+	void *ipc_log_rx;
+	void *ipc_log_pwr;
+	void *ipc_log_misc;
+	unsigned int cur_baud;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -134,12 +161,16 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last);
+static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
+static int msm_geni_serial_power_on(struct uart_port *uport);
+static void msm_geni_serial_power_off(struct uart_port *uport);
 
 static atomic_t uart_line_id = ATOMIC_INIT(0);
 
 #define GET_DEV_PORT(uport) \
 	container_of(uport, struct msm_geni_serial_port, uport)
 
+static struct msm_geni_serial_port msm_geni_console_port;
 static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
 
 static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
@@ -174,22 +205,172 @@
 static DEVICE_ATTR(loopback, 0644, msm_geni_serial_loopback_show,
 					msm_geni_serial_loopback_store);
 
-static void msm_geni_serial_set_mctrl(struct uart_port *port,
+static void dump_ipc(void *ipc_ctx, char *prefix, char *string,
+						u64 addr, int size)
+
+{
+	char buf[DATA_BYTES_PER_LINE * 2];
+	int len = 0;
+
+	if (!ipc_ctx)
+		return;
+	len = min(size, DATA_BYTES_PER_LINE);
+	hex_dump_to_buffer(string, len, DATA_BYTES_PER_LINE, 1, buf,
+						sizeof(buf), false);
+	ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+					(unsigned int)addr, size, buf);
+}
+
+static void check_tx_active(struct uart_port *uport)
+{
+	u32 geni_status = geni_read_reg_nolog(uport->membase,
+					SE_GENI_STATUS);
+
+	while ((geni_status & M_GENI_CMD_ACTIVE)) {
+		cpu_relax();
+		geni_status = geni_read_reg_nolog(uport->membase,
+					SE_GENI_STATUS);
+	}
+}
+
+static int vote_clock_on(struct uart_port *uport)
+{
+	int ret = 0;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+	if (!pm_runtime_enabled(uport->dev)) {
+		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
+		ret = -EPERM;
+		return ret;
+	}
+	ret = msm_geni_serial_power_on(uport);
+	if (ret) {
+		dev_err(uport->dev, "Failed to vote clock on\n");
+		return ret;
+	}
+	__pm_relax(&port->geni_wake);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+	return 0;
+}
+
+static int vote_clock_off(struct uart_port *uport)
+{
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	int ret = 0;
+
+	if (!pm_runtime_enabled(uport->dev)) {
+		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
+		ret = -EPERM;
+		return ret;
+	}
+	/* Block till any on going Tx goes out.*/
+	check_tx_active(uport);
+	msm_geni_serial_power_off(uport);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+	return 0;
+};
+
+static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = -ENOIOCTLCMD;
+
+	switch (cmd) {
+	case TIOCPMGET: {
+		ret = vote_clock_on(uport);
+		break;
+	}
+	case TIOCPMPUT: {
+		ret = vote_clock_off(uport);
+		break;
+	}
+	case TIOCPMACT: {
+		ret = !pm_runtime_status_suspended(uport->dev);
+		break;
+	}
+	default:
+		break;
+	}
+	return ret;
+}
+
+static void msm_geni_serial_break_ctl(struct uart_port *uport, int ctl)
+{
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+		dev_err(uport->dev, "%s Device suspended,vote clocks on.\n",
+							__func__);
+		return;
+	}
+
+	if (ctl) {
+		check_tx_active(uport);
+		geni_setup_m_cmd(uport->membase, UART_START_BREAK, 0);
+	} else {
+		geni_setup_m_cmd(uport->membase, UART_STOP_BREAK, 0);
+	}
+	/* Ensure break start/stop command is setup before returning.*/
+	mb();
+}
+
+static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
+{
+	u32 geni_ios = 0;
+	unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
+
+	if (pm_runtime_status_suspended(uport->dev))
+		return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+
+	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
+	if (!(geni_ios & IO2_DATA_IN))
+		mctrl |= TIOCM_CTS;
+
+	return mctrl;
+}
+
+static void msm_geni_cons_set_mctrl(struct uart_port *uport,
 							unsigned int mctrl)
 {
 }
 
+static void msm_geni_serial_set_mctrl(struct uart_port *uport,
+							unsigned int mctrl)
+{
+	u32 uart_manual_rfr = 0;
+
+	if (pm_runtime_status_suspended(uport->dev)) {
+		dev_info(uport->dev, "%sDevice suspended,vote clocks on\n",
+						__func__);
+		return;
+	}
+	if (!(mctrl & TIOCM_RTS))
+		uart_manual_rfr |= (UART_MANUAL_RFR_EN | UART_RFR_NOT_READY);
+	geni_write_reg_nolog(uart_manual_rfr, uport->membase,
+							SE_UART_MANUAL_RFR);
+	/* Write to flow control must complete before return to client*/
+	mb();
+}
+
 static const char *msm_geni_serial_get_type(struct uart_port *uport)
 {
 	return "MSM";
 }
 
-static struct msm_geni_serial_port *get_port_from_line(int line)
+static struct msm_geni_serial_port *get_port_from_line(int line,
+						bool is_console)
 {
-	if ((line < 0) || (line >= GENI_UART_NR_PORTS))
-		return ERR_PTR(-ENXIO);
+	struct msm_geni_serial_port *port = NULL;
 
-	return &msm_geni_serial_ports[line];
+	if (is_console) {
+		if ((line < 0) || (line >= GENI_UART_CONS_PORTS))
+			port = ERR_PTR(-ENXIO);
+		port = &msm_geni_console_port;
+	} else {
+		if ((line < 0) || (line >= GENI_UART_NR_PORTS))
+			return ERR_PTR(-ENXIO);
+		port = &msm_geni_serial_ports[line];
+	}
+
+	return port;
 }
 
 static int msm_geni_serial_power_on(struct uart_port *uport)
@@ -198,16 +379,16 @@
 
 	ret = pm_runtime_get_sync(uport->dev);
 	if (ret < 0) {
-		dev_err(uport->dev, "%s: Failed (%d)", __func__, ret);
 		pm_runtime_put_noidle(uport->dev);
+		pm_runtime_set_suspended(uport->dev);
+		return ret;
 	}
-	return ret;
+	return 0;
 }
 
 static void msm_geni_serial_power_off(struct uart_port *uport)
 {
-	pm_runtime_mark_last_busy(uport->dev);
-	pm_runtime_put_autosuspend(uport->dev);
+	pm_runtime_put_sync(uport->dev);
 }
 
 static int msm_geni_serial_poll_bit(struct uart_port *uport,
@@ -216,9 +397,26 @@
 	int iter = 0;
 	unsigned int reg;
 	bool met = false;
+	struct msm_geni_serial_port *port = NULL;
 	bool cond = false;
+	unsigned int baud = 115200;
+	unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS;
+	unsigned long total_iter = 0;
 
-	while (iter < 1000) {
+
+	if (uport->private_data) {
+		port = GET_DEV_PORT(uport);
+		baud = (port->cur_baud ? port->cur_baud : 115200);
+		fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+	}
+	/*
+	 * Total polling iterations based on FIFO worth of bytes to be
+	 * sent at current baud .Add a little fluff to the wait.
+	 */
+	total_iter = ((fifo_bits * USEC_PER_SEC) / baud);
+	total_iter += 50;
+
+	while (iter < total_iter) {
 		reg = geni_read_reg_nolog(uport->membase, offset);
 		cond = reg & bit_field;
 		if (cond == set) {
@@ -232,7 +430,7 @@
 }
 
 static void msm_geni_serial_setup_tx(struct uart_port *uport,
-					unsigned int xmit_size)
+				unsigned int xmit_size)
 {
 	u32 m_cmd = 0;
 
@@ -290,23 +488,21 @@
 	unsigned int s_irq_status;
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
-			M_SEC_IRQ_EN, true))) {
-		dev_err(uport->dev, "%s: Failed waiting for SE\n", __func__);
+			M_SEC_IRQ_EN, true)))
 		return -ENXIO;
-	}
 
 	m_irq_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_STATUS);
 	s_irq_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_STATUS);
-	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	geni_write_reg_nolog(m_irq_status, uport->membase,
+						SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase,
+						SE_GENI_S_IRQ_CLEAR);
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_RX_FIFO_STATUS,
-			RX_FIFO_WC_MSK, true))) {
-		dev_err(uport->dev, "%s: Failed waiting for Rx\n", __func__);
+			RX_FIFO_WC_MSK, true)))
 		return -ENXIO;
-	}
 
 	/*
 	 * Read the Rx FIFO only after clearing the interrupt registers and
@@ -324,7 +520,6 @@
 	int b = (int) c;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	se_config_packing(uport->membase, 8, 1, false);
 	geni_write_reg_nolog(port->tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, 1);
@@ -358,10 +553,11 @@
 __msm_geni_serial_console_write(struct uart_port *uport, const char *s,
 				unsigned int count)
 {
-	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	int new_line = 0;
 	int i;
 	int bytes_to_send = count;
+	int fifo_depth = DEF_FIFO_DEPTH_WORDS;
+	int tx_wm = DEF_TX_WM;
 
 	for (i = 0; i < count; i++) {
 		if (s[i] == '\n')
@@ -369,14 +565,13 @@
 	}
 
 	bytes_to_send += new_line;
-	se_config_packing(uport->membase, 8, 1, false);
-	geni_write_reg_nolog(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, bytes_to_send);
 	i = 0;
 	while (i < count) {
 		u32 chars_to_write = 0;
-		u32 avail_fifo_bytes = (port->tx_fifo_depth - port->tx_wm);
+		u32 avail_fifo_bytes = (fifo_depth - tx_wm);
 		/*
 		 * If the WM bit never set, then the Tx state machine is not
 		 * in a valid state, so break, cancel/abort any existing
@@ -409,11 +604,9 @@
 
 	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
 
-	port = get_port_from_line(co->index);
-	if (IS_ERR_OR_NULL(port)) {
-		pr_err("%s:Invalid line %d\n", __func__, co->index);
+	port = get_port_from_line(co->index, true);
+	if (IS_ERR_OR_NULL(port))
 		return;
-	}
 
 	uport = &port->uport;
 	spin_lock(&uport->lock);
@@ -432,7 +625,6 @@
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 
 	tport = &uport->state->port;
-
 	for (i = 0; i < rx_fifo_wc; i++) {
 		int bytes = 4;
 
@@ -471,23 +663,35 @@
 static void msm_geni_serial_start_tx(struct uart_port *uport)
 {
 	unsigned int geni_m_irq_en;
-	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	if (!msm_geni_serial_tx_empty(uport))
+		return;
+
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+		dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+		return;
+	}
 
 	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
 
-	se_config_packing(uport->membase, 8, 4, false);
-	geni_write_reg_nolog(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(msm_port->tx_wm, uport->membase,
 						SE_GENI_TX_WATERMARK_REG);
 	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/* Geni command setup/irq enables should complete before returning.*/
 	mb();
+	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 }
 
 static void msm_geni_serial_stop_tx(struct uart_port *uport)
 {
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+		return;
 
 	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
@@ -510,21 +714,34 @@
 							SE_GENI_M_IRQ_CLEAR);
 	}
 	geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
 }
 
 static void msm_geni_serial_start_rx(struct uart_port *uport)
 {
 	unsigned int geni_s_irq_en;
 	unsigned int geni_m_irq_en;
+	unsigned long cfg0, cfg1;
+	unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+	unsigned int geni_status;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	msm_geni_serial_abort_rx(uport);
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+		return;
+
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	if (geni_status & S_GENI_CMD_ACTIVE)
+		msm_geni_serial_abort_rx(uport);
 	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
 	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_EN);
 	geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
 	geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
-
+	se_get_packing_config(8, 4, false, &cfg0, &cfg1);
+	geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_RX_PACKING_CFG0);
+	geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_RX_PACKING_CFG1);
+	geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
 	geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
 	geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
 	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
@@ -533,6 +750,7 @@
 	 * go through.
 	 */
 	mb();
+	IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
 }
 
 static void msm_geni_serial_stop_rx(struct uart_port *uport)
@@ -541,6 +759,9 @@
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
 
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+		return;
+
 	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
 	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
@@ -586,6 +807,8 @@
 	}
 	uport->icount.rx += ret;
 	tty_flip_buffer_push(tport);
+	dump_ipc(msm_port->ipc_log_rx, "Rx", (char *)msm_port->rx_fifo, 0,
+								rx_bytes);
 	return ret;
 }
 
@@ -597,7 +820,7 @@
 	unsigned int rx_last_byte_valid = 0;
 	unsigned int rx_last = 0;
 	struct tty_port *tport;
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	tport = &uport->state->port;
 	rx_fifo_status = geni_read_reg_nolog(uport->membase,
@@ -607,7 +830,7 @@
 						RX_LAST_BYTE_VALID_SHFT);
 	rx_last = rx_fifo_status & RX_LAST;
 	if (rx_fifo_wc)
-		msm_port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
+		port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
 								rx_last);
 	return ret;
 }
@@ -622,7 +845,8 @@
 	int i = 0;
 	unsigned int tx_fifo_status;
 	unsigned int xmit_size;
-	unsigned int fifo_width_bytes = msm_port->tx_fifo_width >> 3;
+	unsigned int fifo_width_bytes =
+		(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
 
 	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
@@ -645,6 +869,8 @@
 	msm_geni_serial_setup_tx(uport, xmit_size);
 
 	bytes_remaining = xmit_size;
+	dump_ipc(msm_port->ipc_log_tx, "Tx", (char *)&xmit->buf[xmit->tail], 0,
+								xmit_size);
 	while (i < xmit_size) {
 		unsigned int tx_bytes;
 		unsigned int buf = 0;
@@ -664,6 +890,8 @@
 		wmb();
 	}
 	msm_geni_serial_poll_cancel_tx(uport);
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(uport);
 exit_handle_tx:
 	return ret;
 }
@@ -676,12 +904,18 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&uport->lock, flags);
+	if (uart_console(uport) && uport->suspended)
+		goto exit_geni_serial_isr;
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+		goto exit_geni_serial_isr;
 	m_irq_status = geni_read_reg_nolog(uport->membase,
-							SE_GENI_M_IRQ_STATUS);
+						SE_GENI_M_IRQ_STATUS);
 	s_irq_status = geni_read_reg_nolog(uport->membase,
-							SE_GENI_S_IRQ_STATUS);
-	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+						SE_GENI_S_IRQ_STATUS);
+	geni_write_reg_nolog(m_irq_status, uport->membase,
+						SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase,
+						SE_GENI_S_IRQ_CLEAR);
 
 	if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
 		WARN_ON(1);
@@ -701,6 +935,28 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t msm_geni_wakeup_isr(int isr, void *dev)
+{
+	struct uart_port *uport = dev;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	struct tty_struct *tty;
+	unsigned long flags;
+
+	spin_lock_irqsave(&uport->lock, flags);
+	if (port->wakeup_byte) {
+		tty = uport->state->port.tty;
+		tty_insert_flip_char(tty->port, port->wakeup_byte, TTY_NORMAL);
+		IPC_LOG_MSG(port->ipc_log_rx, "%s: Inject 0x%x\n",
+					__func__, port->wakeup_byte);
+		tty_flip_buffer_push(tty->port);
+	}
+	__pm_wakeup_event(&port->geni_wake, WAKEBYTE_TIMEOUT_MSEC);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s:Holding Wake Lock for %d ms\n",
+					__func__, WAKEBYTE_TIMEOUT_MSEC);
+	spin_unlock_irqrestore(&uport->lock, flags);
+	return IRQ_HANDLED;
+}
+
 static int get_tx_fifo_size(struct msm_geni_serial_port *port)
 {
 	struct uart_port *uport;
@@ -719,7 +975,7 @@
 	port->tx_fifo_width = get_tx_fifo_width(uport->membase);
 	if (!port->tx_fifo_width) {
 		dev_err(uport->dev, "%s:Invalid TX FIFO width read\n",
-								 __func__);
+								__func__);
 		return -ENXIO;
 	}
 
@@ -755,33 +1011,89 @@
 	msm_geni_serial_stop_rx(uport);
 	disable_irq(uport->irq);
 	free_irq(uport->irq, msm_port);
-	if (uart_console(uport))
+	if (uart_console(uport)) {
 		se_geni_resources_off(&msm_port->serial_rsc);
-	else
+	} else {
+		if (msm_port->wakeup_irq > 0) {
+			disable_irq(msm_port->wakeup_irq);
+			free_irq(msm_port->wakeup_irq, msm_port);
+		}
+		__pm_relax(&msm_port->geni_wake);
 		msm_geni_serial_power_off(uport);
+	}
+	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 }
 
 static int msm_geni_serial_port_setup(struct uart_port *uport)
 {
 	int ret = 0;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	unsigned long cfg0, cfg1;
 
-	/* For now only assume FIFO mode. */
-	msm_port->xfer_mode = FIFO_MODE;
+
 	set_rfr_wm(msm_port);
-	ret = geni_se_init(uport->membase, msm_port->xfer_mode,
+	if (!uart_console(uport)) {
+		/* For now only assume FIFO mode. */
+		msm_port->xfer_mode = FIFO_MODE;
+		ret = geni_se_init(uport->membase, msm_port->xfer_mode,
 					msm_port->rx_wm, msm_port->rx_rfr);
-	if (ret) {
-		dev_err(uport->dev, "%s: Fail\n", __func__);
-		goto exit_portsetup;
+		if (ret) {
+			dev_err(uport->dev, "%s: Fail\n", __func__);
+			goto exit_portsetup;
+		}
+		se_get_packing_config(8, 4, false, &cfg0, &cfg1);
+		geni_write_reg_nolog(cfg0, uport->membase,
+						SE_GENI_TX_PACKING_CFG0);
+		geni_write_reg_nolog(cfg1, uport->membase,
+						SE_GENI_TX_PACKING_CFG1);
 	}
-
 	msm_port->port_setup = true;
 	/*
 	 * Ensure Port setup related IO completes before returning to
 	 * framework.
 	 */
 	mb();
+	if (!uart_console(uport)) {
+		char name[30];
+
+		memset(name, 0, sizeof(name));
+		if (!msm_port->ipc_log_rx) {
+			scnprintf(name, sizeof(name), "%s%s",
+					dev_name(uport->dev), "_rx");
+			msm_port->ipc_log_rx = ipc_log_context_create(
+					IPC_LOG_TX_RX_PAGES, name, 0);
+			if (!msm_port->ipc_log_rx)
+				dev_info(uport->dev, "Err in Rx IPC Log\n");
+		}
+		memset(name, 0, sizeof(name));
+		if (!msm_port->ipc_log_tx) {
+			scnprintf(name, sizeof(name), "%s%s",
+					dev_name(uport->dev), "_tx");
+			msm_port->ipc_log_tx = ipc_log_context_create(
+					IPC_LOG_TX_RX_PAGES, name, 0);
+			if (!msm_port->ipc_log_tx)
+				dev_info(uport->dev, "Err in Tx IPC Log\n");
+		}
+		memset(name, 0, sizeof(name));
+		if (!msm_port->ipc_log_pwr) {
+			scnprintf(name, sizeof(name), "%s%s",
+					dev_name(uport->dev), "_pwr");
+			msm_port->ipc_log_pwr = ipc_log_context_create(
+					IPC_LOG_PWR_PAGES, name, 0);
+			if (!msm_port->ipc_log_pwr)
+				dev_info(uport->dev, "Err in Pwr IPC Log\n");
+		}
+		memset(name, 0, sizeof(name));
+		if (!msm_port->ipc_log_misc) {
+			scnprintf(name, sizeof(name), "%s%s",
+					dev_name(uport->dev), "_misc");
+			msm_port->ipc_log_misc = ipc_log_context_create(
+					IPC_LOG_MISC_PAGES, name, 0);
+			if (!msm_port->ipc_log_misc)
+				dev_info(uport->dev, "Err in Misc IPC Log\n");
+		}
+
+	}
 exit_portsetup:
 	return ret;
 }
@@ -802,12 +1114,36 @@
 		goto exit_startup;
 	}
 
+	if (msm_port->wakeup_irq > 0) {
+		ret = request_threaded_irq(msm_port->wakeup_irq, NULL,
+				msm_geni_wakeup_isr,
+				IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				"hs_uart_wakeup", uport);
+		if (unlikely(ret)) {
+			dev_err(uport->dev, "%s:Failed to get WakeIRQ ret%d\n",
+								__func__, ret);
+			goto exit_startup;
+		}
+		disable_irq(msm_port->wakeup_irq);
+	}
+
 	if (likely(!uart_console(uport))) {
 		ret = msm_geni_serial_power_on(&msm_port->uport);
 		if (ret)
 			goto exit_startup;
 	}
 
+	if (unlikely(get_se_proto(uport->membase) != UART)) {
+		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
+				 __func__, get_se_proto(uport->membase));
+		if (unlikely(get_se_proto(uport->membase) != UART)) {
+			ret = -ENXIO;
+			disable_irq(uport->irq);
+			free_irq(uport->irq, msm_port);
+			goto exit_startup;
+		}
+	}
+
 	if (!msm_port->port_setup) {
 		if (msm_geni_serial_port_setup(uport))
 			goto exit_startup;
@@ -820,14 +1156,15 @@
 	 * before returning to the framework.
 	 */
 	mb();
+	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 exit_startup:
 	return ret;
 }
 
-static int get_dfs_index(unsigned long clk_freq, unsigned long *ser_clk)
+static int get_clk_cfg(unsigned long clk_freq, unsigned long *ser_clk)
 {
-	unsigned long root_freq[] = {19200000, 7372800, 64000000,
-			96000000, 100000000, 102400000, 128000000};
+	unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+		32000000, 48000000, 64000000, 80000000, 96000000, 100000000};
 	int i;
 	int match = -1;
 
@@ -842,13 +1179,15 @@
 	}
 	if (match != -1)
 		*ser_clk = root_freq[match];
+	else
+		pr_err("clk_freq %ld\n", clk_freq);
 	return match;
 }
 
 static void geni_serial_write_term_regs(struct uart_port *uport, u32 loopback,
 		u32 tx_trans_cfg, u32 tx_parity_cfg, u32 rx_trans_cfg,
 		u32 rx_parity_cfg, u32 bits_per_char, u32 stop_bit_len,
-		u32 rxstale, u32 s_clk_cfg)
+		u32 s_clk_cfg)
 {
 	geni_write_reg_nolog(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
 	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
@@ -865,7 +1204,6 @@
 							SE_UART_RX_WORD_LEN);
 	geni_write_reg_nolog(stop_bit_len, uport->membase,
 						SE_UART_TX_STOP_BIT_LEN);
-	geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
 }
@@ -877,8 +1215,8 @@
 	int clk_div = 0;
 
 	*desired_clk_rate = baud * UART_OVERSAMPLING;
-	dfs_index = get_dfs_index(*desired_clk_rate, &ser_clk);
-	if (dfs_index < 1) {
+	dfs_index = get_clk_cfg(*desired_clk_rate, &ser_clk);
+	if (dfs_index < 0) {
 		pr_err("%s: Can't find matching DFS entry for baud %d\n",
 								__func__, baud);
 		clk_div = -EINVAL;
@@ -901,7 +1239,6 @@
 	unsigned int rx_trans_cfg;
 	unsigned int rx_parity_cfg;
 	unsigned int stop_bit_len;
-	unsigned int rxstale;
 	unsigned int clk_div;
 	unsigned long ser_clk_cfg = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
@@ -909,6 +1246,7 @@
 
 	/* baud rate */
 	baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+	port->cur_baud = baud;
 	clk_div = get_clk_div_rate(baud, &clk_rate);
 	if (clk_div <= 0)
 		goto exit_set_termios;
@@ -966,8 +1304,6 @@
 		break;
 	}
 
-	/* stale timer, set this to 16 characters. */
-	rxstale = bits_per_char * STALE_TIMEOUT;
 
 	/* stop bits */
 	if (termios->c_cflag & CSTOPB)
@@ -984,19 +1320,26 @@
 
 	geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
 		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
-		stop_bit_len, rxstale, ser_clk_cfg);
+		stop_bit_len, ser_clk_cfg);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
+	IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
+						tx_trans_cfg, tx_parity_cfg);
+	IPC_LOG_MSG(port->ipc_log_misc, "Rx: trans_cfg%d parity %d",
+						rx_trans_cfg, rx_parity_cfg);
+	IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n",
+				bits_per_char, stop_bit_len);
 exit_set_termios:
 	return;
 
 }
 
-static unsigned int msm_geni_serial_tx_empty(struct uart_port *port)
+static unsigned int msm_geni_serial_tx_empty(struct uart_port *uport)
 {
 	unsigned int tx_fifo_status;
 	unsigned int is_tx_empty = 1;
 
-	tx_fifo_status = geni_read_reg_nolog(port->membase,
-						SE_GENI_TX_FIFO_STATUS);
+	tx_fifo_status = geni_read_reg_nolog(uport->membase,
+					SE_GENI_TX_FIFO_STATUS);
 	if (tx_fifo_status)
 		is_tx_empty = 0;
 
@@ -1013,11 +1356,12 @@
 	int parity = 'n';
 	int flow = 'n';
 	int ret = 0;
+	unsigned long cfg0, cfg1;
 
 	if (unlikely(co->index >= GENI_UART_NR_PORTS  || co->index < 0))
 		return -ENXIO;
 
-	dev_port = get_port_from_line(co->index);
+	dev_port = get_port_from_line(co->index, true);
 	if (IS_ERR_OR_NULL(dev_port)) {
 		ret = PTR_ERR(dev_port);
 		pr_err("Invalid line %d(%d)\n", co->index, ret);
@@ -1045,6 +1389,9 @@
 	 * it else we could end up in data loss scenarios.
 	 */
 	msm_geni_serial_poll_cancel_tx(uport);
+	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+	geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
+	geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
@@ -1066,7 +1413,6 @@
 {
 	struct uart_port *uport = &dev->port;
 	int ret = 0;
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	u32 tx_trans_cfg = 0;
 	u32 tx_parity_cfg = 0;
 	u32 rx_trans_cfg = 0;
@@ -1078,6 +1424,7 @@
 	u32 baud = 115200;
 	u32 clk_div;
 	unsigned long clk_rate;
+	unsigned long cfg0, cfg1;
 
 	if (!uport->membase) {
 		ret = -ENOMEM;
@@ -1089,13 +1436,8 @@
 		goto exit_geni_serial_earlyconsetup;
 	}
 
-	msm_port->xfer_mode = FIFO_MODE;
-	set_rfr_wm(msm_port);
-	msm_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
-	msm_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
-	msm_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
-	geni_se_init(uport->membase, msm_port->xfer_mode, msm_port->rx_wm,
-							msm_port->rx_rfr);
+	geni_se_init(uport->membase, FIFO_MODE, (DEF_FIFO_DEPTH_WORDS >> 1),
+						(DEF_FIFO_DEPTH_WORDS - 2));
 	/*
 	 * Ignore Flow control.
 	 * Disable Tx Parity.
@@ -1126,9 +1468,24 @@
 	 * it else we could end up in data loss scenarios.
 	 */
 	msm_geni_serial_poll_cancel_tx(uport);
-	geni_serial_write_term_regs(uport, 0, tx_trans_cfg,
-		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
-		stop_bit, rx_stale, s_clk_cfg);
+	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+	geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
+	geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
+	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
+							SE_UART_TX_TRANS_CFG);
+	geni_write_reg_nolog(tx_parity_cfg, uport->membase,
+							SE_UART_TX_PARITY_CFG);
+	geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+							SE_UART_RX_TRANS_CFG);
+	geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+							SE_UART_RX_PARITY_CFG);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_TX_WORD_LEN);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_RX_WORD_LEN);
+	geni_write_reg_nolog(stop_bit, uport->membase, SE_UART_TX_STOP_BIT_LEN);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
 
 	dev->con->write = msm_geni_serial_early_console_write;
 	dev->con->setup = NULL;
@@ -1189,6 +1546,23 @@
 		dev_err(uport->dev, "Failed to create dbg dir\n");
 }
 
+static const struct uart_ops msm_geni_console_pops = {
+	.tx_empty = msm_geni_serial_tx_empty,
+	.stop_tx = msm_geni_serial_stop_tx,
+	.start_tx = msm_geni_serial_start_tx,
+	.stop_rx = msm_geni_serial_stop_rx,
+	.set_termios = msm_geni_serial_set_termios,
+	.startup = msm_geni_serial_startup,
+	.config_port = msm_geni_serial_config_port,
+	.shutdown = msm_geni_serial_shutdown,
+	.type = msm_geni_serial_get_type,
+	.set_mctrl = msm_geni_cons_set_mctrl,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char	= msm_geni_serial_get_char,
+	.poll_put_char	= msm_geni_serial_poll_put_char,
+#endif
+};
+
 static const struct uart_ops msm_geni_serial_pops = {
 	.tx_empty = msm_geni_serial_tx_empty,
 	.stop_tx = msm_geni_serial_stop_tx,
@@ -1200,10 +1574,10 @@
 	.shutdown = msm_geni_serial_shutdown,
 	.type = msm_geni_serial_get_type,
 	.set_mctrl = msm_geni_serial_set_mctrl,
-#ifdef CONFIG_CONSOLE_POLL
-	.poll_get_char	= msm_geni_serial_get_char,
-	.poll_put_char	= msm_geni_serial_poll_put_char,
-#endif
+	.get_mctrl = msm_geni_serial_get_mctrl,
+	.break_ctl = msm_geni_serial_break_ctl,
+	.flush_buffer = NULL,
+	.ioctl = msm_geni_serial_ioctl,
 };
 
 static const struct of_device_id msm_geni_device_tbl[] = {
@@ -1225,17 +1599,7 @@
 	struct resource *res;
 	struct uart_driver *drv;
 	const struct of_device_id *id;
-
-	if (pdev->dev.of_node)
-		line = of_alias_get_id(pdev->dev.of_node, "serial");
-	else
-		line = pdev->id;
-
-	if (line < 0)
-		line = atomic_inc_return(&uart_line_id) - 1;
-
-	if ((line < 0) || (line >= GENI_UART_NR_PORTS))
-		return -ENXIO;
+	bool is_console = false;
 
 	id = of_match_device(msm_geni_device_tbl, &pdev->dev);
 	if (id) {
@@ -1246,7 +1610,22 @@
 		return -ENODEV;
 	}
 
-	dev_port = get_port_from_line(line);
+	if (pdev->dev.of_node) {
+		if (drv->cons)
+			line = of_alias_get_id(pdev->dev.of_node, "serial");
+		else
+			line = of_alias_get_id(pdev->dev.of_node, "hsuart");
+	} else {
+		line = pdev->id;
+	}
+
+	if (line < 0)
+		line = atomic_inc_return(&uart_line_id) - 1;
+
+	if ((line < 0) || (line >= GENI_UART_NR_PORTS))
+		return -ENXIO;
+	is_console = (drv->cons ? true : false);
+	dev_port = get_port_from_line(line, is_console);
 	if (IS_ERR_OR_NULL(dev_port)) {
 		ret = PTR_ERR(dev_port);
 		dev_err(&pdev->dev, "Invalid line %d(%d)\n",
@@ -1279,9 +1658,13 @@
 		dev_port->serial_rsc.ab = UART_CORE2X_VOTE;
 		dev_port->serial_rsc.ib = DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH;
 	} else {
-		dev_info(&pdev->dev, "No bus master specified");
+		dev_info(&pdev->dev, "No bus master specified\n");
 	}
 
+	if (of_property_read_u8(pdev->dev.of_node, "qcom,wakeup-byte",
+					&dev_port->wakeup_byte))
+		dev_info(&pdev->dev, "No Wakeup byte specified\n");
+
 	dev_port->serial_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
 	if (IS_ERR(dev_port->serial_rsc.se_clk)) {
 		ret = PTR_ERR(dev_port->serial_rsc.se_clk);
@@ -1342,6 +1725,7 @@
 		goto exit_geni_serial_probe;
 	}
 
+	wakeup_source_init(&dev_port->geni_wake, dev_name(&pdev->dev));
 	dev_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
@@ -1355,9 +1739,14 @@
 		goto exit_geni_serial_probe;
 	}
 
+	/* Optional to use the Rx pin as wakeup irq */
+	dev_port->wakeup_irq = platform_get_irq(pdev, 1);
+	if ((dev_port->wakeup_irq < 0 && !is_console))
+		dev_info(&pdev->dev, "No wakeup IRQ configured\n");
+
 	uport->private_data = (void *)drv;
 	platform_set_drvdata(pdev, dev_port);
-	if (drv->cons) {
+	if (is_console) {
 		dev_port->handle_rx = handle_rx_console;
 		dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32),
 								GFP_KERNEL);
@@ -1366,13 +1755,11 @@
 		dev_port->rx_fifo = devm_kzalloc(uport->dev,
 				sizeof(dev_port->rx_fifo_depth * sizeof(u32)),
 								GFP_KERNEL);
-		pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
-		pm_runtime_use_autosuspend(&pdev->dev);
 		pm_runtime_enable(&pdev->dev);
 	}
 
 	dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
-				line, uport->fifosize, (drv->cons ? 1 : 0));
+				line, uport->fifosize, is_console);
 	device_create_file(uport->dev, &dev_attr_loopback);
 	msm_geni_serial_debug_init(uport);
 	dev_port->port_setup = false;
@@ -1388,6 +1775,7 @@
 	struct uart_driver *drv =
 			(struct uart_driver *)port->uport.private_data;
 
+	wakeup_source_trash(&port->geni_wake);
 	uart_remove_one_port(drv, &port->uport);
 	msm_bus_scale_unregister(port->serial_rsc.bus_bw);
 	return 0;
@@ -1399,16 +1787,38 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+	int ret = 0;
 
-	return se_geni_resources_off(&port->serial_rsc);
+	ret = se_geni_resources_off(&port->serial_rsc);
+	if (ret) {
+		dev_err(dev, "%s: Error ret %d\n", __func__, ret);
+		goto exit_runtime_suspend;
+	}
+	if (port->wakeup_irq > 0)
+		enable_irq(port->wakeup_irq);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
+				atomic_read(&dev->power.usage_count));
+exit_runtime_suspend:
+	return ret;
 }
 
 static int msm_geni_serial_runtime_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+	int ret = 0;
 
-	return se_geni_resources_on(&port->serial_rsc);
+	if (port->wakeup_irq > 0)
+		disable_irq(port->wakeup_irq);
+	ret = se_geni_resources_on(&port->serial_rsc);
+	if (ret) {
+		dev_err(dev, "%s: Error ret %d\n", __func__, ret);
+		goto exit_runtime_resume;
+	}
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
+				atomic_read(&dev->power.usage_count));
+exit_runtime_resume:
+	return ret;
 }
 
 static int msm_geni_serial_sys_suspend_noirq(struct device *dev)
@@ -1435,9 +1845,11 @@
 	struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
 	struct uart_port *uport = &port->uport;
 
-	if (uart_console(uport))
+	if (uart_console(uport)) {
+		se_geni_resources_on(&port->serial_rsc);
 		uart_resume_port((struct uart_driver *)uport->private_data,
 									uport);
+	}
 	return 0;
 }
 #else
@@ -1504,6 +1916,13 @@
 		msm_geni_serial_ports[i].uport.line = i;
 	}
 
+	for (i = 0; i < GENI_UART_CONS_PORTS; i++) {
+		msm_geni_console_port.uport.iotype = UPIO_MEM;
+		msm_geni_console_port.uport.ops = &msm_geni_console_pops;
+		msm_geni_console_port.uport.flags = UPF_BOOT_AUTOCONF;
+		msm_geni_console_port.uport.line = i;
+	}
+
 	ret = console_register(&msm_geni_console_driver);
 	if (ret)
 		return ret;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0dfe271..62574bf 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -301,9 +301,6 @@
 	 */
 	if (dwc->ep0_bounced && dep->number <= 1) {
 		dwc->ep0_bounced = false;
-
-		usb_gadget_unmap_request_by_dev(dwc->sysdev,
-				&req->request, req->direction);
 		unmap_after_complete = true;
 	} else {
 		usb_gadget_unmap_request(&dwc->gadget,
@@ -1454,9 +1451,6 @@
 		unsigned transfer_in_flight;
 		unsigned started;
 
-		if (dep->flags & DWC3_EP_STALL)
-			return 0;
-
 		if (dep->number > 1)
 			trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
 		else
@@ -1481,8 +1475,6 @@
 		else
 			dep->flags |= DWC3_EP_STALL;
 	} else {
-		if (!(dep->flags & DWC3_EP_STALL))
-			return 0;
 
 		ret = dwc3_send_clear_stall_ep_cmd(dep);
 		if (ret)
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index a2a9185..51ab794 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -17,6 +17,7 @@
 #include <linux/device.h>
 #include <linux/usb/audio.h>
 #include <linux/wait.h>
+#include <linux/pm_qos.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
@@ -268,6 +269,8 @@
 	/* number of frames sent since start_time */
 	s64				frames_sent;
 	struct audio_source_config	*config;
+	/* for creating and issuing QoS requests */
+	struct pm_qos_request pm_qos;
 };
 
 static inline struct audio_dev *func_to_audio(struct usb_function *f)
@@ -740,6 +743,10 @@
 	runtime->hw.channels_max = 2;
 
 	audio->substream = substream;
+
+	/* Add the QoS request and set the latency to 0 */
+	pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
 	return 0;
 }
 
@@ -749,6 +756,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&audio->lock, flags);
+
+	/* Remove the QoS request */
+	pm_qos_remove_request(&audio->pm_qos);
+
 	audio->substream = NULL;
 	spin_unlock_irqrestore(&audio->lock, flags);
 
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index aaa0fc2..af1bca6 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -47,6 +47,7 @@
 #define MTP_BULK_BUFFER_SIZE       16384
 #define INTR_BUFFER_SIZE           28
 #define MAX_INST_NAME_LEN          40
+#define MTP_MAX_FILE_SIZE          0xFFFFFFFFL
 
 /* String IDs */
 #define INTERFACE_STRING_INDEX	0
@@ -837,7 +838,12 @@
 		if (hdr_size) {
 			/* prepend MTP data header */
 			header = (struct mtp_data_header *)req->buf;
-			header->length = __cpu_to_le32(count);
+			/*
+                         * set file size with header according to
+                         * MTP Specification v1.0
+                         */
+			header->length = (count > MTP_MAX_FILE_SIZE) ?
+				MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
 			header->type = __cpu_to_le16(2); /* data packet */
 			header->command = __cpu_to_le16(dev->xfer_command);
 			header->transaction_id =
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e643c7..18dc18f 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -315,7 +315,32 @@
 	struct page **pages;
 	pgoff_t next_index;
 	int nr_pages = 0;
-	int ret;
+	int got = 0;
+	int ret = 0;
+
+	if (!current->journal_info) {
+		/* caller of readpages does not hold buffer and read caps
+		 * (fadvise, madvise and readahead cases) */
+		int want = CEPH_CAP_FILE_CACHE;
+		ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+		if (ret < 0) {
+			dout("start_read %p, error getting cap\n", inode);
+		} else if (!(got & want)) {
+			dout("start_read %p, no cache cap\n", inode);
+			ret = 0;
+		}
+		if (ret <= 0) {
+			if (got)
+				ceph_put_cap_refs(ci, got);
+			while (!list_empty(page_list)) {
+				page = list_entry(page_list->prev,
+						  struct page, lru);
+				list_del(&page->lru);
+				put_page(page);
+			}
+			return ret;
+		}
+	}
 
 	off = (u64) page_offset(page);
 
@@ -338,15 +363,18 @@
 				    CEPH_OSD_FLAG_READ, NULL,
 				    ci->i_truncate_seq, ci->i_truncate_size,
 				    false);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
 
 	/* build page vector */
 	nr_pages = calc_pages_for(0, len);
 	pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!pages)
-		goto out;
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
 	for (i = 0; i < nr_pages; ++i) {
 		page = list_entry(page_list->prev, struct page, lru);
 		BUG_ON(PageLocked(page));
@@ -379,6 +407,12 @@
 	if (ret < 0)
 		goto out_pages;
 	ceph_osdc_put_request(req);
+
+	/* After adding locked pages to page cache, the inode holds cache cap.
+	 * So we can drop our cap refs. */
+	if (got)
+		ceph_put_cap_refs(ci, got);
+
 	return nr_pages;
 
 out_pages:
@@ -387,8 +421,11 @@
 		unlock_page(pages[i]);
 	}
 	ceph_put_page_vector(pages, nr_pages, false);
-out:
+out_put:
 	ceph_osdc_put_request(req);
+out:
+	if (got)
+		ceph_put_cap_refs(ci, got);
 	return ret;
 }
 
@@ -425,7 +462,6 @@
 		rc = start_read(inode, page_list, max);
 		if (rc < 0)
 			goto out;
-		BUG_ON(rc == 0);
 	}
 out:
 	ceph_fscache_readpages_cancel(inode, page_list);
@@ -1372,9 +1408,11 @@
 	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
-	    ci->i_inline_version == CEPH_INLINE_NONE)
+	    ci->i_inline_version == CEPH_INLINE_NONE) {
+		current->journal_info = vma->vm_file;
 		ret = filemap_fault(vma, vmf);
-	else
+		current->journal_info = NULL;
+	} else
 		ret = -EAGAIN;
 
 	dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f3f2110..03951f9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2479,6 +2479,27 @@
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
 
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+{
+	int ret, err = 0;
+
+	BUG_ON(need & ~CEPH_CAP_FILE_RD);
+	BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+	ret = ceph_pool_perm_check(ci, need);
+	if (ret < 0)
+		return ret;
+
+	ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+	if (ret) {
+		if (err == -EAGAIN) {
+			ret = 0;
+		} else if (err < 0) {
+			ret = err;
+		}
+	}
+	return ret;
+}
+
 /*
  * Wait for caps, and take cap references.  If we can't get a WR cap
  * due to a small max_size, make sure we check_max_size (and possibly
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index f995e35..ca3f630 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1249,8 +1249,9 @@
 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
 		     ceph_cap_string(got));
-
+		current->journal_info = filp;
 		ret = generic_file_read_iter(iocb, to);
+		current->journal_info = NULL;
 	}
 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 12f2252..953275b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2080,11 +2080,6 @@
 	if (inode_dirty_flags)
 		__mark_inode_dirty(inode, inode_dirty_flags);
 
-	if (ia_valid & ATTR_MODE) {
-		err = posix_acl_chmod(inode, attr->ia_mode);
-		if (err)
-			goto out_put;
-	}
 
 	if (mask) {
 		req->r_inode = inode;
@@ -2098,13 +2093,11 @@
 	     ceph_cap_string(dirtied), mask);
 
 	ceph_mdsc_put_request(req);
-	if (mask & CEPH_SETATTR_SIZE)
+	ceph_free_cap_flush(prealloc_cf);
+
+	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
 		__ceph_do_pending_vmtruncate(inode);
-	ceph_free_cap_flush(prealloc_cf);
-	return err;
-out_put:
-	ceph_mdsc_put_request(req);
-	ceph_free_cap_flush(prealloc_cf);
+
 	return err;
 }
 
@@ -2123,7 +2116,12 @@
 	if (err != 0)
 		return err;
 
-	return __ceph_setattr(inode, attr);
+	err = __ceph_setattr(inode, attr);
+
+	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
+		err = posix_acl_chmod(inode, attr->ia_mode);
+
+	return err;
 }
 
 /*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3e3fa916..622d5dd 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -905,6 +905,8 @@
 
 extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
 			 loff_t endoff, int *got, struct page **pinned_page);
+extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+			     int need, int want, int *got);
 
 /* for counting open files by mode */
 extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 94661cf..b3830f7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -241,6 +241,7 @@
 	/* verify the message */
 	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
 	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+	int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
 	void (*downgrade_oplock)(struct TCP_Server_Info *,
 					struct cifsInodeInfo *, bool);
 	/* process transaction2 response */
@@ -1314,12 +1315,19 @@
 	void *callback_data;	  /* general purpose pointer for callback */
 	void *resp_buf;		/* pointer to received SMB header */
 	int mid_state;	/* wish this were enum but can not pass to wait_event */
+	unsigned int mid_flags;
 	__le16 command;		/* smb command code */
 	bool large_buf:1;	/* if valid response, is pointer to large buf */
 	bool multiRsp:1;	/* multiple trans2 responses for one request  */
 	bool multiEnd:1;	/* both received */
 };
 
+struct close_cancelled_open {
+	struct cifs_fid         fid;
+	struct cifs_tcon        *tcon;
+	struct work_struct      work;
+};
+
 /*	Make code in transport.c a little cleaner by moving
 	update of optional stats into function below */
 #ifdef CONFIG_CIFS_STATS2
@@ -1451,6 +1459,9 @@
 #define   MID_RESPONSE_MALFORMED 0x10
 #define   MID_SHUTDOWN		 0x20
 
+/* Flags */
+#define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
 #define   CIFS_SMALL_BUFFER     1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e3fed92..586fdac 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1423,6 +1423,8 @@
 
 	length = discard_remaining_data(server);
 	dequeue_mid(mid, rdata->result);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
@@ -1534,6 +1536,8 @@
 		return cifs_readv_discard(server, mid);
 
 	dequeue_mid(mid, false);
+	mid->resp_buf = server->smallbuf;
+	server->smallbuf = NULL;
 	return length;
 }
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 893be07..b8015de 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -882,10 +882,19 @@
 
 		server->lstrp = jiffies;
 		if (mid_entry != NULL) {
+			if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+			     mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+					server->ops->handle_cancelled_mid)
+				server->ops->handle_cancelled_mid(
+							mid_entry->resp_buf,
+							server);
+
 			if (!mid_entry->multiRsp || mid_entry->multiEnd)
 				mid_entry->callback(mid_entry);
-		} else if (!server->ops->is_oplock_break ||
-			   !server->ops->is_oplock_break(buf, server)) {
+		} else if (server->ops->is_oplock_break &&
+			   server->ops->is_oplock_break(buf, server)) {
+			cifs_dbg(FYI, "Received oplock break\n");
+		} else {
 			cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
 				 atomic_read(&midCount));
 			cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3d38348..9730780 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -654,3 +654,47 @@
 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
 	return false;
 }
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+	struct close_cancelled_open *cancelled = container_of(work,
+					struct close_cancelled_open, work);
+
+	cifs_dbg(VFS, "Close unmatched open\n");
+
+	SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+		   cancelled->fid.volatile_fid);
+	cifs_put_tcon(cancelled->tcon);
+	kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+	struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
+	struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+	struct cifs_tcon *tcon;
+	struct close_cancelled_open *cancelled;
+
+	if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
+		return 0;
+
+	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+	if (!cancelled)
+		return -ENOMEM;
+
+	tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
+	if (!tcon) {
+		kfree(cancelled);
+		return -ENOENT;
+	}
+
+	cancelled->fid.persistent_fid = rsp->PersistentFileId;
+	cancelled->fid.volatile_fid = rsp->VolatileFileId;
+	cancelled->tcon = tcon;
+	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+	queue_work(cifsiod_wq, &cancelled->work);
+
+	return 0;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 5d456eb..007abf7 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1565,6 +1565,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1645,6 +1646,7 @@
 	.clear_stats = smb2_clear_stats,
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1727,6 +1729,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
@@ -1815,6 +1818,7 @@
 	.print_stats = smb2_print_stats,
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
+	.handle_cancelled_mid = smb2_handle_cancelled_mid,
 	.downgrade_oplock = smb2_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index f2d511a..04ef6e9 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@
 			      struct smb_rqst *rqst);
 extern struct mid_q_entry *smb2_setup_async_request(
 			struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+					   __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+						__u64 ses_id, __u32  tid);
 extern int smb2_calc_signature(struct smb_rqst *rqst,
 				struct TCP_Server_Info *server);
 extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -158,6 +162,9 @@
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
 			     const u64 persistent_fid, const u64 volatile_fid,
 			     const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+					struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
 			 u64 persistent_file_id, u64 volatile_file_id,
 			 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index bc9a7b6..390b0d0 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,22 +115,68 @@
 }
 
 static struct cifs_ses *
-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
 {
 	struct cifs_ses *ses;
 
-	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
-		if (ses->Suid != smb2hdr->SessionId)
+		if (ses->Suid != ses_id)
 			continue;
-		spin_unlock(&cifs_tcp_ses_lock);
 		return ses;
 	}
-	spin_unlock(&cifs_tcp_ses_lock);
 
 	return NULL;
 }
 
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+	struct cifs_ses *ses;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
+{
+	struct cifs_tcon *tcon;
+
+	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+		if (tcon->tid != tid)
+			continue;
+		++tcon->tc_count;
+		return tcon;
+	}
+
+	return NULL;
+}
+
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+	struct cifs_ses *ses;
+	struct cifs_tcon *tcon;
+
+	spin_lock(&cifs_tcp_ses_lock);
+	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+	if (!ses) {
+		spin_unlock(&cifs_tcp_ses_lock);
+		return NULL;
+	}
+	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+	spin_unlock(&cifs_tcp_ses_lock);
+
+	return tcon;
+}
 
 int
 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
@@ -142,7 +188,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
@@ -359,7 +405,7 @@
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
-	ses = smb2_find_smb_ses(smb2_pdu, server);
+	ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
 	if (!ses) {
 		cifs_dbg(VFS, "%s: Could not find session\n", __func__);
 		return 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 206a597..cc26d41 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -727,9 +727,11 @@
 
 	rc = wait_for_response(ses->server, midQ);
 	if (rc != 0) {
+		cifs_dbg(FYI, "Cancelling wait for mid %llu\n",	midQ->mid);
 		send_cancel(ses->server, buf, midQ);
 		spin_lock(&GlobalMid_Lock);
 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+			midQ->mid_flags |= MID_WAIT_CANCELLED;
 			midQ->callback = DeleteMidQEntry;
 			spin_unlock(&GlobalMid_Lock);
 			cifs_small_buf_release(buf);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index dba2ff8..4523346 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -358,6 +358,8 @@
 {
 	unsigned int len, v, hdr, dlen;
 	u32 max_blocksize = svc_max_payload(rqstp);
+	struct kvec *head = rqstp->rq_arg.head;
+	struct kvec *tail = rqstp->rq_arg.tail;
 
 	p = decode_fh(p, &args->fh);
 	if (!p)
@@ -367,6 +369,8 @@
 	args->count = ntohl(*p++);
 	args->stable = ntohl(*p++);
 	len = args->len = ntohl(*p++);
+	if ((void *)p > head->iov_base + head->iov_len)
+		return 0;
 	/*
 	 * The count must equal the amount of data passed.
 	 */
@@ -377,9 +381,8 @@
 	 * Check to make sure that we got the right number of
 	 * bytes.
 	 */
-	hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-	dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-		+ rqstp->rq_arg.tail[0].iov_len - hdr;
+	hdr = (void*)p - head->iov_base;
+	dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
 	/*
 	 * Round the length of the data which was specified up to
 	 * the next multiple of XDR units and then compare that
@@ -396,7 +399,7 @@
 		len = args->len = max_blocksize;
 	}
 	rqstp->rq_vec[0].iov_base = (void*)p;
-	rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+	rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
 	v = 0;
 	while (len > rqstp->rq_vec[v].iov_len) {
 		len -= rqstp->rq_vec[v].iov_len;
@@ -471,6 +474,8 @@
 	/* first copy and check from the first page */
 	old = (char*)p;
 	vec = &rqstp->rq_arg.head[0];
+	if ((void *)old > vec->iov_base + vec->iov_len)
+		return 0;
 	avail = vec->iov_len - (old - (char*)vec->iov_base);
 	while (len && avail && *old) {
 		*new++ = *old++;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index a2b65fc..1645b97 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -733,6 +733,37 @@
 	return nfserr;
 }
 
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page.  The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases.  Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+				struct svc_procedure *proc)
+{
+	/*
+	 * The ACL code has more careful bounds-checking and is not
+	 * susceptible to this problem:
+	 */
+	if (rqstp->rq_prog != NFS_PROGRAM)
+		return false;
+	/*
+	 * Ditto NFSv4 (which can in theory have argument and reply both
+	 * more than a page):
+	 */
+	if (rqstp->rq_vers >= 4)
+		return false;
+	/* The reply will be small, we're OK: */
+	if (proc->pc_xdrressize > 0 &&
+	    proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+		return false;
+
+	return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
 int
 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 {
@@ -745,6 +776,11 @@
 				rqstp->rq_vers, rqstp->rq_proc);
 	proc = rqstp->rq_procinfo;
 
+	if (nfs_request_too_big(rqstp, proc)) {
+		dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+		*statp = rpc_garbage_args;
+		return 1;
+	}
 	/*
 	 * Give the xdr decoder a chance to change this if it wants
 	 * (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 41b468a..de07ff6 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -280,6 +280,7 @@
 					struct nfsd_writeargs *args)
 {
 	unsigned int len, hdr, dlen;
+	struct kvec *head = rqstp->rq_arg.head;
 	int v;
 
 	p = decode_fh(p, &args->fh);
@@ -300,9 +301,10 @@
 	 * Check to make sure that we got the right number of
 	 * bytes.
 	 */
-	hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-	dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-		- hdr;
+	hdr = (void*)p - head->iov_base;
+	if (hdr > head->iov_len)
+		return 0;
+	dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
 
 	/*
 	 * Round the length of the data which was specified up to
@@ -316,7 +318,7 @@
 		return 0;
 
 	rqstp->rq_vec[0].iov_base = (void*)p;
-	rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+	rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
 	v = 0;
 	while (len > rqstp->rq_vec[v].iov_len) {
 		len -= rqstp->rq_vec[v].iov_len;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5cc0a36..c573113 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1506,7 +1506,7 @@
 
 #endif	/* CONFIG_SMP */
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 
 static int sched_init_task_load_show(struct seq_file *m, void *v)
 {
@@ -3062,7 +3062,7 @@
 #ifdef CONFIG_SMP
 	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
 #endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
 	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
 #endif
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b938fa7..7ec77f8 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -40,6 +40,7 @@
 	short unsigned settime_flags;	/* to show in fdinfo */
 	struct rcu_head rcu;
 	struct list_head clist;
+	spinlock_t cancel_lock;
 	bool might_cancel;
 };
 
@@ -113,7 +114,7 @@
 	rcu_read_unlock();
 }
 
-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
 	if (ctx->might_cancel) {
 		ctx->might_cancel = false;
@@ -123,6 +124,13 @@
 	}
 }
 
+static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+{
+	spin_lock(&ctx->cancel_lock);
+	__timerfd_remove_cancel(ctx);
+	spin_unlock(&ctx->cancel_lock);
+}
+
 static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
 	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
@@ -133,6 +141,7 @@
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
+	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
 	     ctx->clockid == CLOCK_REALTIME_ALARM ||
 	     ctx->clockid == CLOCK_POWEROFF_ALARM) &&
@@ -143,9 +152,10 @@
 			list_add_rcu(&ctx->clist, &cancel_list);
 			spin_unlock(&cancel_lock);
 		}
-	} else if (ctx->might_cancel) {
-		timerfd_remove_cancel(ctx);
+	} else {
+		__timerfd_remove_cancel(ctx);
 	}
+	spin_unlock(&ctx->cancel_lock);
 }
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
@@ -403,6 +413,7 @@
 		return -ENOMEM;
 
 	init_waitqueue_head(&ctx->wqh);
+	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
 	if (isalarm(ctx)) {
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 24dd11e..91ea077 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -55,7 +55,6 @@
 #define DISP_CC_PLL0						38
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				39
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				40
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			41
 
 #define DISP_CC_MDSS_CORE_BCR					0
 #define DISP_CC_MDSS_GCC_CLOCKS_BCR				1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 73a8c0b..e411e8e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -187,6 +187,16 @@
 #define GPLL0_OUT_MAIN						169
 #define GPLL1							170
 #define GPLL1_OUT_MAIN						171
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				172
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				173
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			174
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			175
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			176
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				177
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			178
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			179
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				180
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				181
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index a31fa20..b2907e0 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -27,7 +27,7 @@
 #define RPMH_RF_CLK2_A						9
 #define RPMH_RF_CLK3						10
 #define RPMH_RF_CLK3_A						11
-#define RPMH_QDSS_CLK						12
-#define RPMH_QDSS_A_CLK						13
+#define RPMH_RF_CLK4						12
+#define RPMH_RF_CLK4_A						13
 
 #endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index bda14ef..744ea4f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -125,6 +125,7 @@
  * BVEC_POOL_IDX()
  */
 #define BIO_RESET_BITS	10
+#define BIO_INLINECRYPT 15
 
 /*
  * We support 6 different bvec pools, the last one is magic in that it
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index ec7047c..0538291 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -41,13 +41,6 @@
 
 extern struct bus_type coresight_bustype;
 
-enum coresight_clk_rate {
-	CORESIGHT_CLK_RATE_OFF,
-	CORESIGHT_CLK_RATE_TRACE = 1000,
-	CORESIGHT_CLK_RATE_HSTRACE = 2000,
-	CORESIGHT_CLK_RATE_FIXED = 3000,
-};
-
 enum coresight_dev_type {
 	CORESIGHT_DEV_TYPE_NONE,
 	CORESIGHT_DEV_TYPE_SINK,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 1f6892c..e3d181e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -436,6 +436,7 @@
 void iommu_fwspec_free(struct device *dev);
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
 int iommu_fwspec_get_id(struct device *dev, u32 *id);
+int iommu_is_available(struct device *dev);
 
 #else /* CONFIG_IOMMU_API */
 
@@ -711,6 +712,10 @@
 	return -ENODEV;
 }
 
+static inline int iommu_is_available(struct device *dev)
+{
+	return -ENODEV;
+}
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
index 8316aaa..b9527d3 100644
--- a/include/linux/msm_pcie.h
+++ b/include/linux/msm_pcie.h
@@ -157,18 +157,6 @@
 int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
 			u32 offset, u32 mask, u32 value);
 
-/*
- * msm_pcie_configure_sid - calculates the SID for a PCIe endpoint.
- * @dev:	device structure
- * @sid:	the calculated SID
- * @domain:	the domain number of the Root Complex
- *
- * This function calculates the SID for a PCIe endpoint device.
- *
- * Return: 0 on success, negative value on error
- */
-int msm_pcie_configure_sid(struct device *dev, u32 *sid,
-			int *domain);
 #else /* !CONFIG_PCI_MSM */
 static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
 			void *user, void *data, u32 options)
@@ -206,12 +194,6 @@
 {
 	return -ENODEV;
 }
-
-static inline int msm_pcie_configure_sid(struct device *dev, u32 *sid,
-			int *domain)
-{
-	return -ENODEV;
-}
 #endif /* CONFIG_PCI_MSM */
 
 #endif /* __MSM_PCIE_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e25f183..bd22670 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -806,6 +806,7 @@
 void phy_mac_interrupt(struct phy_device *phydev, int new_link);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_ksettings_get(struct phy_device *phydev,
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index cb4387d..12b3d51e8 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -86,6 +86,7 @@
 #define SE_GENI_TX_WATERMARK_REG	(0x80C)
 #define SE_GENI_RX_WATERMARK_REG	(0x810)
 #define SE_GENI_RX_RFR_WATERMARK_REG	(0x814)
+#define SE_GENI_IOS			(0x908)
 #define SE_GENI_M_GP_LENGTH		(0x910)
 #define SE_GENI_S_GP_LENGTH		(0x914)
 #define SE_GSI_EVENT_EN			(0xE18)
@@ -228,6 +229,10 @@
 #define GENI_M_EVENT_EN		(BIT(2))
 #define GENI_S_EVENT_EN		(BIT(3))
 
+/* SE_GENI_IOS fields */
+#define IO2_DATA_IN		(BIT(1))
+#define RX_DATA_IN		(BIT(0))
+
 /* SE_IRQ_EN fields */
 #define DMA_RX_IRQ_EN		(BIT(0))
 #define DMA_TX_IRQ_EN		(BIT(1))
@@ -275,7 +280,7 @@
 static inline void geni_write_reg(unsigned int value, void __iomem *base,
 				int offset)
 {
-	return writel_relaxed(value, (base + offset));
+	writel_relaxed(value, (base + offset));
 }
 
 static inline int get_se_proto(void __iomem *base)
@@ -479,11 +484,11 @@
 	return rx_fifo_depth;
 }
 
-static inline void se_config_packing(void __iomem *base, int bpw,
-				int pack_words, bool msb_to_lsb)
+static inline void se_get_packing_config(int bpw, int pack_words,
+					bool msb_to_lsb, unsigned long *cfg0,
+					unsigned long *cfg1)
 {
 	u32 cfg[4] = {0};
-	unsigned long cfg0, cfg1;
 	int len = ((bpw < 8) ? (bpw - 1) : 7);
 	int idx = ((msb_to_lsb == 1) ? len : 0);
 	int iter = (bpw * pack_words) >> 3;
@@ -495,8 +500,16 @@
 		if (i == iter - 1)
 			cfg[i] |= 1;
 	}
-	cfg0 = cfg[0] | (cfg[1] << 10);
-	cfg1 = cfg[2] | (cfg[3] << 10);
+	*cfg0 = cfg[0] | (cfg[1] << 10);
+	*cfg1 = cfg[2] | (cfg[3] << 10);
+}
+
+static inline void se_config_packing(void __iomem *base, int bpw,
+				int pack_words, bool msb_to_lsb)
+{
+	unsigned long cfg0, cfg1;
+
+	se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
 	geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
 	geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
 	geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index bac5e90..26d1a4c 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -75,8 +75,6 @@
 		int num_of_intents, uint32_t size);
 
 
-#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
-	 defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
 struct apr_svc_ch_dev {
 	void               *handle;
 	spinlock_t         w_lock;
@@ -88,20 +86,5 @@
 	unsigned int       channel_state;
 	bool               if_remote_intent_ready;
 };
-#else
-struct apr_svc_ch_dev {
-	struct smd_channel *ch;
-	spinlock_t         lock;
-	spinlock_t         w_lock;
-	struct mutex       m_lock;
-	apr_svc_cb_fn      func;
-	char               data[APR_MAX_BUF];
-	wait_queue_head_t  wait;
-	void               *priv;
-	uint32_t           smd_state;
-	wait_queue_head_t  dest;
-	uint32_t           dest_state;
-};
-#endif
 
 #endif
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 1c13cd2..0e4586f 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -2216,25 +2216,6 @@
 #if defined(CONFIG_THERMAL_QPNP_ADC_TM)				\
 			|| defined(CONFIG_THERMAL_QPNP_ADC_TM_MODULE)
 /**
- * qpnp_adc_tm_usbid_configure() - Configures Channel 0 of VADC_BTM to
- *		monitor USB_ID channel using 100k internal pull-up.
- *		USB driver passes the high/low voltage threshold along
- *		with the notification callback once the set thresholds
- *		are crossed.
- * @param:	Structure pointer of qpnp_adc_tm_usbid_param type.
- *		Clients pass the low/high voltage along with the threshold
- *		notification callback.
- */
-int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
-					struct qpnp_adc_tm_btm_param *param);
-/**
- * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
- *		assigned for monitoring USB_ID. Disables the low/high
- *		threshold activation for channel 0 as well.
- * @param:	none.
- */
-int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
-/**
  * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
  *		monitor the corresponding ADC channel for threshold detection.
  *		Driver passes the high/low voltage threshold along
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52524a8..decb943 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -179,6 +179,7 @@
 
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
 extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern unsigned int sched_get_cpu_util(int cpu);
 
 extern void calc_global_load(unsigned long ticks);
 
@@ -1483,7 +1484,9 @@
 	u32 sum_history[RAVG_HIST_SIZE_MAX];
 	u32 *curr_window_cpu, *prev_window_cpu;
 	u32 curr_window, prev_window;
+#ifdef CONFIG_SCHED_HMP
 	u64 curr_burst, avg_burst, avg_sleep_time;
+#endif
 	u16 active_windows;
 	u32 pred_demand;
 	u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -1659,7 +1662,8 @@
 	const struct sched_class *sched_class;
 	struct sched_entity se;
 	struct sched_rt_entity rt;
-#ifdef CONFIG_SCHED_HMP
+	u64 last_sleep_ts;
+#ifdef CONFIG_SCHED_WALT
 	struct ravg ravg;
 	/*
 	 * 'init_load_pct' represents the initial task load assigned to children
@@ -1672,6 +1676,7 @@
 	struct related_thread_group *grp;
 	struct list_head grp_list;
 	u64 cpu_cycles;
+	bool misfit;
 #endif
 
 #ifdef CONFIG_CGROUP_SCHED
@@ -2635,8 +2640,6 @@
 extern unsigned long sched_get_busy(int cpu);
 extern void sched_get_cpus_busy(struct sched_load *busy,
 				const struct cpumask *query_cpus);
-extern void sched_set_io_is_busy(int val);
-extern int sched_set_boost(int enable);
 extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
 extern u32 sched_get_init_task_load(struct task_struct *p);
 extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
@@ -2652,25 +2655,12 @@
 			 int wakeup_energy, int wakeup_latency);
 extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
 				int wakeup_energy, int wakeup_latency);
-extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern u64 sched_ktime_clock(void);
 extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
 extern unsigned int sched_get_group_id(struct task_struct *p);
 
 #else /* CONFIG_SCHED_HMP */
 static inline void free_task_load_ptrs(struct task_struct *p) { }
 
-static inline u64 sched_ktime_clock(void)
-{
-	return 0;
-}
-
-static inline int
-register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
-	return 0;
-}
-
 static inline int sched_set_window(u64 window_start, unsigned int window_size)
 {
 	return -EINVAL;
@@ -2682,21 +2672,11 @@
 static inline void sched_get_cpus_busy(struct sched_load *busy,
 				       const struct cpumask *query_cpus) {};
 
-static inline void sched_set_io_is_busy(int val) {};
-
-static inline int sched_set_boost(int enable)
-{
-	return -EINVAL;
-}
-
 static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
 {
 	return 0;
 }
 
-static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
-					u32 fmin, u32 fmax) { }
-
 static inline void
 sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
 {
@@ -2708,6 +2688,37 @@
 }
 #endif /* CONFIG_SCHED_HMP */
 
+#ifdef CONFIG_SCHED_WALT
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
+					  u32 fmax);
+extern int sched_set_boost(int enable);
+#else
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+	return 0;
+}
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_SCHED_WALT */
+
+#ifndef CONFIG_SCHED_WALT
+#ifndef CONFIG_SCHED_HMP
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+					u32 fmin, u32 fmax) { }
+#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
+
 #ifdef CONFIG_NO_HZ_COMMON
 void calc_load_enter_idle(void);
 void calc_load_exit_idle(void);
@@ -2962,7 +2973,7 @@
 #endif
 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
 extern void sched_dead(struct task_struct *p);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 extern void sched_exit(struct task_struct *p);
 #else
 static inline void sched_exit(struct task_struct *p) { }
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index ae9032a..190bf3b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -25,8 +25,14 @@
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_walt_init_task_load_pct;
-extern unsigned int sysctl_sched_walt_cpu_high_irqload;
+extern unsigned int sysctl_sched_init_task_load_pct;
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_cpu_high_irqload;
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int sysctl_sched_boost;
 #endif
 
 #ifdef CONFIG_SCHED_HMP
@@ -43,8 +49,6 @@
 extern unsigned int sysctl_sched_freq_reporting_policy;
 extern unsigned int sysctl_sched_window_stats_policy;
 extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_cpu_high_irqload;
-extern unsigned int sysctl_sched_init_task_load_pct;
 extern unsigned int sysctl_sched_spill_nr_run;
 extern unsigned int sysctl_sched_spill_load_pct;
 extern unsigned int sysctl_sched_upmigrate_pct;
@@ -52,12 +56,10 @@
 extern unsigned int sysctl_sched_group_upmigrate_pct;
 extern unsigned int sysctl_sched_group_downmigrate_pct;
 extern unsigned int sysctl_early_detection_duration;
-extern unsigned int sysctl_sched_boost;
 extern unsigned int sysctl_sched_small_wakee_task_load_pct;
 extern unsigned int sysctl_sched_big_waker_task_load_pct;
 extern unsigned int sysctl_sched_select_prev_cpu_us;
 extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sysctl_sched_new_task_windows;
 extern unsigned int sysctl_sched_pred_alert_freq;
 extern unsigned int sysctl_sched_freq_aggregate;
 extern unsigned int sysctl_sched_enable_thread_grouping;
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 60cc768..f3fa9e6 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -79,6 +79,7 @@
  * @current_state:   current client state
  * @crtc_id:		crtc_id associated with this rsc client.
  * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @id:		Index of client. It will be assigned during client_create call
  * @list:	list to attach client master list
  */
 struct sde_rsc_client {
@@ -86,6 +87,7 @@
 	short current_state;
 	int crtc_id;
 	u32 rsc_index;
+	u32 id;
 	struct list_head list;
 };
 
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 262fa64..0583431 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -103,6 +103,7 @@
 int msm_vidc_g_fmt(void *instance, struct v4l2_format *f);
 int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a);
 int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
 int msm_vidc_release_buffer(void *instance, int buffer_type,
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index 34434fd..75e6ccd 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,8 @@
 int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
 			struct tcs_cmd *cmd, int *n);
 
+int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable);
+
 int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n);
 
 int rpmh_invalidate(struct rpmh_client *rc);
@@ -70,6 +72,9 @@
 			enum rpmh_state state, struct tcs_cmd *cmd, int *n)
 { return -ENODEV; }
 
+static inline int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
+{ return -ENODEV; }
+
 static inline int rpmh_write_control(struct rpmh_client *rc,
 			struct tcs_cmd *cmd, int n)
 { return -ENODEV; }
diff --git a/include/sound/voice_svc.h b/include/sound/voice_svc.h
deleted file mode 100644
index 035053f..0000000
--- a/include/sound/voice_svc.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __VOICE_SVC_H__
-#define __VOICE_SVC_H__
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define VOICE_SVC_DRIVER_NAME "voice_svc"
-
-#define VOICE_SVC_MVM_STR "MVM"
-#define VOICE_SVC_CVS_STR "CVS"
-#define MAX_APR_SERVICE_NAME_LEN  64
-
-#define MSG_REGISTER 0x1
-#define MSG_REQUEST  0x2
-#define MSG_RESPONSE 0x3
-
-struct voice_svc_write_msg {
-	__u32 msg_type;
-	__u8 payload[0];
-};
-
-struct voice_svc_register {
-	char svc_name[MAX_APR_SERVICE_NAME_LEN];
-	__u32 src_port;
-	__u8 reg_flag;
-};
-
-struct voice_svc_cmd_response {
-	__u32 src_port;
-	__u32 dest_port;
-	__u32 token;
-	__u32 opcode;
-	__u32 payload_size;
-	__u8 payload[0];
-};
-
-struct voice_svc_cmd_request {
-	char svc_name[MAX_APR_SERVICE_NAME_LEN];
-	__u32 src_port;
-	__u32 dest_port;
-	__u32 token;
-	__u32 opcode;
-	__u32 payload_size;
-	__u8 payload[0];
-};
-
-#endif
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 7586072..ad19e73 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -192,6 +192,42 @@
 	TP_ARGS(core, phase)
 );
 
+DECLARE_EVENT_CLASS(clk_state_dump,
+
+	TP_PROTO(const char *name, unsigned int prepare_count,
+	unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+	TP_ARGS(name, prepare_count, enable_count, rate, vdd_level),
+
+	TP_STRUCT__entry(
+		__string(name,			name)
+		__field(unsigned int,		prepare_count)
+		__field(unsigned int,		enable_count)
+		__field(unsigned long,		rate)
+		__field(unsigned int,		vdd_level)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->prepare_count = prepare_count;
+		__entry->enable_count = enable_count;
+		__entry->rate = rate;
+		__entry->vdd_level = vdd_level;
+	),
+
+	TP_printk("%s\tprepare:enable cnt [%u:%u]\trate: vdd_level [%lu:%u]",
+		__get_str(name), __entry->prepare_count, __entry->enable_count,
+		__entry->rate, __entry->vdd_level)
+);
+
+DEFINE_EVENT(clk_state_dump, clk_state,
+
+	TP_PROTO(const char *name, unsigned int prepare_count,
+	unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+	TP_ARGS(name, prepare_count, enable_count, rate, vdd_level)
+);
+
 #endif /* _TRACE_CLK_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/lmh.h b/include/trace/events/lmh.h
new file mode 100644
index 0000000..f6edacf
--- /dev/null
+++ b/include/trace/events/lmh.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lmh
+
+#if !defined(_TRACE_LMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LMH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lmh_dcvs_freq,
+	TP_PROTO(unsigned long cpu, unsigned long freq),
+
+	TP_ARGS(cpu, freq),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, cpu)
+		__field(unsigned long, freq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->freq = freq;
+	),
+
+	TP_printk("cpu:%lu max frequency:%lu", __entry->cpu, __entry->freq)
+);
+#endif /* _TRACE_LMH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index e792405..d55175e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -767,6 +767,33 @@
 		__entry->vote)
 );
 
+TRACE_EVENT(sugov_util_update,
+	    TP_PROTO(int cpu,
+		     unsigned long util, unsigned long max_cap,
+		     unsigned long nl, unsigned long pl,
+		     unsigned int flags),
+	    TP_ARGS(cpu, util, max_cap, nl, pl, flags),
+	    TP_STRUCT__entry(
+		    __field(	int,		cpu)
+		    __field(	unsigned long,	util)
+		    __field(	unsigned long,	max_cap)
+		    __field(	unsigned long,	nl)
+		    __field(	unsigned long,	pl)
+		    __field(	unsigned int,	flags)
+	    ),
+	    TP_fast_assign(
+		    __entry->cpu = cpu;
+		    __entry->util = util;
+		    __entry->max_cap = max_cap;
+		    __entry->nl = nl;
+		    __entry->pl = pl;
+		    __entry->flags = flags;
+	    ),
+	    TP_printk("cpu=%d util=%lu max_cap=%lu nl=%lu pl=%lu flags=%x",
+		      __entry->cpu, __entry->util, __entry->max_cap,
+		      __entry->nl, __entry->pl, __entry->flags)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 0427805..4a9c625 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -108,169 +108,11 @@
 			)
 );
 
-#ifdef CONFIG_SCHED_HMP
-
+#ifdef CONFIG_SCHED_WALT
 struct group_cpu_time;
-struct migration_sum_data;
 extern const char *task_event_names[];
-extern const char *migrate_type_names[];
 
-TRACE_EVENT(sched_task_load,
-
-	TP_PROTO(struct task_struct *p, bool boost, int reason,
-		 bool sync, bool need_idle, u32 flags, int best_cpu),
-
-	TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
-
-	TP_STRUCT__entry(
-		__array(	char,	comm,	TASK_COMM_LEN	)
-		__field(	pid_t,	pid			)
-		__field(unsigned int,	demand			)
-		__field(	bool,	boost			)
-		__field(	int,	reason			)
-		__field(	bool,	sync			)
-		__field(	bool,	need_idle		)
-		__field(	u32,	flags			)
-		__field(	int,	best_cpu		)
-		__field(	u64,	latency			)
-		__field(	int,	grp_id			)
-		__field(	u64,	avg_burst		)
-		__field(	u64,	avg_sleep		)
-	),
-
-	TP_fast_assign(
-		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-		__entry->pid		= p->pid;
-		__entry->demand		= p->ravg.demand;
-		__entry->boost		= boost;
-		__entry->reason		= reason;
-		__entry->sync		= sync;
-		__entry->need_idle	= need_idle;
-		__entry->flags		= flags;
-		__entry->best_cpu	= best_cpu;
-		__entry->latency	= p->state == TASK_WAKING ?
-						      sched_ktime_clock() -
-						      p->ravg.mark_start : 0;
-		__entry->grp_id		= p->grp ? p->grp->id : 0;
-		__entry->avg_burst	= p->ravg.avg_burst;
-		__entry->avg_sleep	= p->ravg.avg_sleep_time;
-	),
-
-	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
-		__entry->pid, __entry->comm, __entry->demand,
-		__entry->boost, __entry->reason, __entry->sync,
-		__entry->need_idle, __entry->flags, __entry->grp_id,
-		__entry->best_cpu, __entry->latency, __entry->avg_burst,
-		__entry->avg_sleep)
-);
-
-TRACE_EVENT(sched_set_preferred_cluster,
-
-	TP_PROTO(struct related_thread_group *grp, u64 total_demand),
-
-	TP_ARGS(grp, total_demand),
-
-	TP_STRUCT__entry(
-		__field(	int,	id			)
-		__field(	u64,	demand			)
-		__field(	int,	cluster_first_cpu	)
-		__array(	char,	comm,	TASK_COMM_LEN	)
-		__field(	pid_t,	pid			)
-		__field(unsigned int,	task_demand			)
-	),
-
-	TP_fast_assign(
-		__entry->id			= grp->id;
-		__entry->demand			= total_demand;
-		__entry->cluster_first_cpu	= grp->preferred_cluster ?
-							cluster_first_cpu(grp->preferred_cluster)
-							: -1;
-	),
-
-	TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
-			__entry->id, __entry->demand,
-			__entry->cluster_first_cpu)
-);
-
-DECLARE_EVENT_CLASS(sched_cpu_load,
-
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-
-	TP_ARGS(rq, idle, irqload, power_cost, temp),
-
-	TP_STRUCT__entry(
-		__field(unsigned int, cpu			)
-		__field(unsigned int, idle			)
-		__field(unsigned int, nr_running		)
-		__field(unsigned int, nr_big_tasks		)
-		__field(unsigned int, load_scale_factor		)
-		__field(unsigned int, capacity			)
-		__field(	 u64, cumulative_runnable_avg	)
-		__field(	 u64, irqload			)
-		__field(unsigned int, max_freq			)
-		__field(unsigned int, power_cost		)
-		__field(	 int, cstate			)
-		__field(	 int, dstate			)
-		__field(	 int, temp			)
-	),
-
-	TP_fast_assign(
-		__entry->cpu			= rq->cpu;
-		__entry->idle			= idle;
-		__entry->nr_running		= rq->nr_running;
-		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
-		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
-		__entry->capacity		= cpu_capacity(rq->cpu);
-		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
-		__entry->irqload		= irqload;
-		__entry->max_freq		= cpu_max_freq(rq->cpu);
-		__entry->power_cost		= power_cost;
-		__entry->cstate			= rq->cstate;
-		__entry->dstate			= rq->cluster->dstate;
-		__entry->temp			= temp;
-	),
-
-	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
-	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
-	__entry->load_scale_factor, __entry->capacity,
-	__entry->cumulative_runnable_avg, __entry->irqload,
-	__entry->max_freq, __entry->power_cost, __entry->cstate,
-	__entry->dstate, __entry->temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-TRACE_EVENT(sched_set_boost,
-
-	TP_PROTO(int type),
-
-	TP_ARGS(type),
-
-	TP_STRUCT__entry(
-		__field(int, type			)
-	),
-
-	TP_fast_assign(
-		__entry->type = type;
-	),
-
-	TP_printk("type %d", __entry->type)
-);
-
-#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP)
+#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT)
 static inline void __window_data(u32 *dst, u32 *src)
 {
 	if (src)
@@ -343,6 +185,117 @@
 }
 #endif
 
+TRACE_EVENT(sched_update_pred_demand,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
+		 unsigned int pred_demand),
+
+	TP_ARGS(rq, p, runtime, pct, pred_demand),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(       pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	pct			)
+		__field(unsigned int,	pred_demand		)
+		__array(	  u8,	bucket, NUM_BUSY_BUCKETS)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->pct            = pct;
+		__entry->pred_demand     = pred_demand;
+		memcpy(__entry->bucket, p->ravg.busy_buckets,
+					NUM_BUSY_BUCKETS * sizeof(u8));
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->pct, __entry->cpu,
+		__entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
+		__entry->bucket[2], __entry->bucket[3],__entry->bucket[4],
+		__entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
+		__entry->bucket[8], __entry->bucket[9])
+);
+
+TRACE_EVENT(sched_update_history,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+			enum task_event evt),
+
+	TP_ARGS(rq, p, runtime, samples, evt),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	samples			)
+		__field(enum task_event,	evt		)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	pred_demand		)
+		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
+		__field(unsigned int,	nr_big_tasks		)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->samples        = samples;
+		__entry->evt            = evt;
+		__entry->demand         = p->ravg.demand;
+		__entry->pred_demand     = p->ravg.pred_demand;
+		memcpy(__entry->hist, p->ravg.sum_history,
+					RAVG_HIST_SIZE_MAX * sizeof(u32));
+		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
+		" (hist: %u %u %u %u %u) cpu %d nr_big %u",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->samples,
+		task_event_names[__entry->evt],
+		__entry->demand, __entry->pred_demand,
+		__entry->hist[0], __entry->hist[1],
+		__entry->hist[2], __entry->hist[3],
+		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
+);
+
+TRACE_EVENT(sched_get_task_cpu_cycles,
+
+	TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+
+	TP_ARGS(cpu, event, cycles, exec_time),
+
+	TP_STRUCT__entry(
+		__field(int,		cpu		)
+		__field(int,		event		)
+		__field(u64,		cycles		)
+		__field(u64,		exec_time	)
+		__field(u32,		freq		)
+		__field(u32,		legacy_freq	)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->event		= event;
+		__entry->cycles		= cycles;
+		__entry->exec_time	= exec_time;
+		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
+		__entry->legacy_freq	= cpu_cur_freq(cpu);
+	),
+
+	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
+		  __entry->cpu, __entry->event, __entry->cycles,
+		  __entry->exec_time, __entry->freq, __entry->legacy_freq)
+);
+
 TRACE_EVENT(sched_update_task_ravg,
 
 	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
@@ -434,148 +387,92 @@
 		__entry->curr_top, __entry->prev_top)
 );
 
-TRACE_EVENT(sched_get_task_cpu_cycles,
+TRACE_EVENT(sched_update_task_ravg_mini,
 
-	TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
+		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+		 struct group_cpu_time *cpu_time),
 
-	TP_ARGS(cpu, event, cycles, exec_time),
-
-	TP_STRUCT__entry(
-		__field(int,		cpu		)
-		__field(int,		event		)
-		__field(u64,		cycles		)
-		__field(u64,		exec_time	)
-		__field(u32,		freq		)
-		__field(u32,		legacy_freq	)
-	),
-
-	TP_fast_assign(
-		__entry->cpu		= cpu;
-		__entry->event		= event;
-		__entry->cycles		= cycles;
-		__entry->exec_time	= exec_time;
-		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
-		__entry->legacy_freq	= cpu_cur_freq(cpu);
-	),
-
-	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
-		  __entry->cpu, __entry->event, __entry->cycles,
-		  __entry->exec_time, __entry->freq, __entry->legacy_freq)
-);
-
-TRACE_EVENT(sched_update_history,
-
-	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
-			enum task_event evt),
-
-	TP_ARGS(rq, p, runtime, samples, evt),
+	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
 
 	TP_STRUCT__entry(
 		__array(	char,	comm,   TASK_COMM_LEN	)
 		__field(	pid_t,	pid			)
-		__field(unsigned int,	runtime			)
-		__field(	 int,	samples			)
+		__field(	u64,	wallclock		)
+		__field(	u64,	mark_start		)
+		__field(	u64,	delta_m			)
+		__field(	u64,	win_start		)
+		__field(	u64,	delta			)
 		__field(enum task_event,	evt		)
 		__field(unsigned int,	demand			)
-		__field(unsigned int,	pred_demand		)
-		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
-		__field(unsigned int,	nr_big_tasks		)
 		__field(	 int,	cpu			)
+		__field(	u64,	rq_cs			)
+		__field(	u64,	rq_ps			)
+		__field(	u64,	grp_cs			)
+		__field(	u64,	grp_ps			)
+		__field(	u32,	curr_window		)
+		__field(	u32,	prev_window		)
 	),
 
 	TP_fast_assign(
-		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-		__entry->pid            = p->pid;
-		__entry->runtime        = runtime;
-		__entry->samples        = samples;
+		__entry->wallclock      = wallclock;
+		__entry->win_start      = rq->window_start;
+		__entry->delta          = (wallclock - rq->window_start);
 		__entry->evt            = evt;
-		__entry->demand         = p->ravg.demand;
-		__entry->pred_demand     = p->ravg.pred_demand;
-		memcpy(__entry->hist, p->ravg.sum_history,
-					RAVG_HIST_SIZE_MAX * sizeof(u32));
-		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
 		__entry->cpu            = rq->cpu;
-	),
-
-	TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
-		" (hist: %u %u %u %u %u) cpu %d nr_big %u",
-		__entry->pid, __entry->comm,
-		__entry->runtime, __entry->samples,
-		task_event_names[__entry->evt],
-		__entry->demand, __entry->pred_demand,
-		__entry->hist[0], __entry->hist[1],
-		__entry->hist[2], __entry->hist[3],
-		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
-);
-
-TRACE_EVENT(sched_reset_all_window_stats,
-
-	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
-		int reason, unsigned int old_val, unsigned int new_val),
-
-	TP_ARGS(window_start, window_size, time_taken,
-		reason, old_val, new_val),
-
-	TP_STRUCT__entry(
-		__field(	u64,	window_start		)
-		__field(	u64,	window_size		)
-		__field(	u64,	time_taken		)
-		__field(	int,	reason			)
-		__field(unsigned int,	old_val			)
-		__field(unsigned int,	new_val			)
-	),
-
-	TP_fast_assign(
-		__entry->window_start = window_start;
-		__entry->window_size = window_size;
-		__entry->time_taken = time_taken;
-		__entry->reason	= reason;
-		__entry->old_val = old_val;
-		__entry->new_val = new_val;
-	),
-
-	TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
-		  __entry->time_taken, __entry->window_start,
-		  __entry->window_size,
-		  sched_window_reset_reasons[__entry->reason],
-		  __entry->old_val, __entry->new_val)
-);
-
-TRACE_EVENT(sched_update_pred_demand,
-
-	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
-		 unsigned int pred_demand),
-
-	TP_ARGS(rq, p, runtime, pct, pred_demand),
-
-	TP_STRUCT__entry(
-		__array(	char,	comm,   TASK_COMM_LEN	)
-		__field(       pid_t,	pid			)
-		__field(unsigned int,	runtime			)
-		__field(	 int,	pct			)
-		__field(unsigned int,	pred_demand		)
-		__array(	  u8,	bucket, NUM_BUSY_BUCKETS)
-		__field(	 int,	cpu			)
-	),
-
-	TP_fast_assign(
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->pid            = p->pid;
-		__entry->runtime        = runtime;
-		__entry->pct            = pct;
-		__entry->pred_demand     = pred_demand;
-		memcpy(__entry->bucket, p->ravg.busy_buckets,
-					NUM_BUSY_BUCKETS * sizeof(u8));
-		__entry->cpu            = rq->cpu;
+		__entry->mark_start     = p->ravg.mark_start;
+		__entry->delta_m        = (wallclock - p->ravg.mark_start);
+		__entry->demand         = p->ravg.demand;
+		__entry->rq_cs          = rq->curr_runnable_sum;
+		__entry->rq_ps          = rq->prev_runnable_sum;
+		__entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
+		__entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
+		__entry->curr_window	= p->ravg.curr_window;
+		__entry->prev_window	= p->ravg.prev_window;
 	),
 
-	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
-		__entry->pid, __entry->comm,
-		__entry->runtime, __entry->pct, __entry->cpu,
-		__entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
-		__entry->bucket[2], __entry->bucket[3] ,__entry->bucket[4],
-		__entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
-		__entry->bucket[8], __entry->bucket[9])
+	TP_printk("wc %llu ws %llu delta %llu event %s cpu %d task %d (%s) ms %llu delta %llu demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u grp_cs %lld grp_ps %lld",
+		__entry->wallclock, __entry->win_start, __entry->delta,
+		task_event_names[__entry->evt], __entry->cpu,
+		__entry->pid, __entry->comm, __entry->mark_start,
+		__entry->delta_m, __entry->demand,
+		__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
+		__entry->prev_window,
+		__entry->grp_cs,
+		__entry->grp_ps)
+);
+
+struct migration_sum_data;
+extern const char *migrate_type_names[];
+
+TRACE_EVENT(sched_set_preferred_cluster,
+
+	TP_PROTO(struct related_thread_group *grp, u64 total_demand),
+
+	TP_ARGS(grp, total_demand),
+
+	TP_STRUCT__entry(
+		__field(	int,	id			)
+		__field(	u64,	demand			)
+		__field(	int,	cluster_first_cpu	)
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	task_demand			)
+	),
+
+	TP_fast_assign(
+		__entry->id			= grp->id;
+		__entry->demand			= total_demand;
+		__entry->cluster_first_cpu	= grp->preferred_cluster ?
+							cluster_first_cpu(grp->preferred_cluster)
+							: -1;
+	),
+
+	TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
+			__entry->id, __entry->demand,
+			__entry->cluster_first_cpu)
 );
 
 TRACE_EVENT(sched_migration_update_sum,
@@ -626,6 +523,172 @@
 		__entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
 );
 
+TRACE_EVENT(sched_set_boost,
+
+	TP_PROTO(int type),
+
+	TP_ARGS(type),
+
+	TP_STRUCT__entry(
+		__field(int, type			)
+	),
+
+	TP_fast_assign(
+		__entry->type = type;
+	),
+
+	TP_printk("type %d", __entry->type)
+);
+
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+DECLARE_EVENT_CLASS(sched_cpu_load,
+
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+
+	TP_ARGS(rq, idle, irqload, power_cost, temp),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu			)
+		__field(unsigned int, idle			)
+		__field(unsigned int, nr_running		)
+		__field(unsigned int, nr_big_tasks		)
+		__field(unsigned int, load_scale_factor		)
+		__field(unsigned int, capacity			)
+		__field(	 u64, cumulative_runnable_avg	)
+		__field(	 u64, irqload			)
+		__field(unsigned int, max_freq			)
+		__field(unsigned int, power_cost		)
+		__field(	 int, cstate			)
+		__field(	 int, dstate			)
+		__field(	 int, temp			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= rq->cpu;
+		__entry->idle			= idle;
+		__entry->nr_running		= rq->nr_running;
+		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
+		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
+		__entry->capacity		= cpu_capacity(rq->cpu);
+		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+		__entry->irqload		= irqload;
+		__entry->max_freq		= cpu_max_freq(rq->cpu);
+		__entry->power_cost		= power_cost;
+		__entry->cstate			= rq->cstate;
+		__entry->dstate			= rq->cluster->dstate;
+		__entry->temp			= temp;
+	),
+
+	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
+	__entry->load_scale_factor, __entry->capacity,
+	__entry->cumulative_runnable_avg, __entry->irqload,
+	__entry->max_freq, __entry->power_cost, __entry->cstate,
+	__entry->dstate, __entry->temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+
+TRACE_EVENT(sched_task_load,
+
+	TP_PROTO(struct task_struct *p, bool boost, int reason,
+		 bool sync, bool need_idle, u32 flags, int best_cpu),
+
+	TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	demand			)
+		__field(	bool,	boost			)
+		__field(	int,	reason			)
+		__field(	bool,	sync			)
+		__field(	bool,	need_idle		)
+		__field(	u32,	flags			)
+		__field(	int,	best_cpu		)
+		__field(	u64,	latency			)
+		__field(	int,	grp_id			)
+		__field(	u64,	avg_burst		)
+		__field(	u64,	avg_sleep		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->demand		= p->ravg.demand;
+		__entry->boost		= boost;
+		__entry->reason		= reason;
+		__entry->sync		= sync;
+		__entry->need_idle	= need_idle;
+		__entry->flags		= flags;
+		__entry->best_cpu	= best_cpu;
+		__entry->latency	= p->state == TASK_WAKING ?
+						      sched_ktime_clock() -
+						      p->ravg.mark_start : 0;
+		__entry->grp_id		= p->grp ? p->grp->id : 0;
+		__entry->avg_burst	= p->ravg.avg_burst;
+		__entry->avg_sleep	= p->ravg.avg_sleep_time;
+	),
+
+	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
+		__entry->pid, __entry->comm, __entry->demand,
+		__entry->boost, __entry->reason, __entry->sync,
+		__entry->need_idle, __entry->flags, __entry->grp_id,
+		__entry->best_cpu, __entry->latency, __entry->avg_burst,
+		__entry->avg_sleep)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+TRACE_EVENT(sched_reset_all_window_stats,
+
+	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
+		int reason, unsigned int old_val, unsigned int new_val),
+
+	TP_ARGS(window_start, window_size, time_taken,
+		reason, old_val, new_val),
+
+	TP_STRUCT__entry(
+		__field(	u64,	window_start		)
+		__field(	u64,	window_size		)
+		__field(	u64,	time_taken		)
+		__field(	int,	reason			)
+		__field(unsigned int,	old_val			)
+		__field(unsigned int,	new_val			)
+	),
+
+	TP_fast_assign(
+		__entry->window_start = window_start;
+		__entry->window_size = window_size;
+		__entry->time_taken = time_taken;
+		__entry->reason	= reason;
+		__entry->old_val = old_val;
+		__entry->new_val = new_val;
+	),
+
+	TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
+		  __entry->time_taken, __entry->window_start,
+		  __entry->window_size,
+		  sched_window_reset_reasons[__entry->reason],
+		  __entry->old_val, __entry->new_val)
+);
+
 TRACE_EVENT(sched_get_busy,
 
 	TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
@@ -690,6 +753,153 @@
 
 #endif	/* CONFIG_SCHED_HMP */
 
+#ifdef CONFIG_SMP
+TRACE_EVENT(sched_cpu_util,
+
+	TP_PROTO(struct task_struct *p, int cpu, int task_util, unsigned long curr_util, unsigned long new_cum_util, int sync),
+
+	TP_ARGS(p, cpu, task_util, curr_util, new_cum_util, sync),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN	)
+		__field(int, pid			)
+		__field(unsigned int, cpu			)
+		__field(int, task_util				)
+		__field(unsigned int, nr_running		)
+		__field(long, cpu_util			)
+		__field(long, cpu_util_cum			)
+		__field(long, new_cum_util			)
+		__field(unsigned int, capacity_curr		)
+		__field(unsigned int, capacity			)
+		__field(unsigned long, curr_util		)
+		__field(int, sync				)
+		__field(int, idle_state				)
+		__field(unsigned int, irqload		)
+		__field(int, high_irqload		)
+		__field(int, task_in_cum_demand		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid			= p->pid;
+		__entry->cpu			= cpu;
+		__entry->task_util		= task_util;
+		__entry->nr_running		= cpu_rq(cpu)->nr_running;
+		__entry->cpu_util		= cpu_util(cpu);
+		__entry->cpu_util_cum		= cpu_util_cum(cpu, 0);
+		__entry->new_cum_util		= new_cum_util;
+		__entry->task_in_cum_demand	= task_in_cum_window_demand(cpu_rq(cpu), p);
+		__entry->capacity_curr		= capacity_curr_of(cpu);
+		__entry->capacity		= capacity_of(cpu);
+		__entry->curr_util		= curr_util;
+		__entry->sync			= sync;
+		__entry->idle_state		= idle_get_state_idx(cpu_rq(cpu));
+		__entry->irqload		= sched_irqload(cpu);
+		__entry->high_irqload		= sched_cpu_high_irqload(cpu);
+	),
+
+	TP_printk("comm=%s pid=%d cpu=%d task_util=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld new_cum_util=%ld task_in_cum=%d capacity_curr=%u capacity=%u curr_util=%ld sync=%d idle_state=%d irqload=%u high_irqload=%u",
+		__entry->comm, __entry->pid, __entry->cpu, __entry->task_util, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->new_cum_util, __entry->task_in_cum_demand, __entry->capacity_curr, __entry->capacity, __entry->curr_util, __entry->sync, __entry->idle_state, __entry->irqload, __entry->high_irqload)
+);
+
+TRACE_EVENT(sched_energy_diff_packing,
+
+	TP_PROTO(struct task_struct *p, unsigned long task_util,
+		 int targeted_cpus, int nrg_pack, int nrg_spread),
+
+	TP_ARGS(p, task_util, targeted_cpus, nrg_pack, nrg_spread),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN	)
+		__field(int, pid			)
+		__field(unsigned long, task_util	)
+		__field(int, targeted_cpus		)
+		__field(int, nrg_pack		)
+		__field(int, nrg_spread		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid			= p->pid;
+		__entry->task_util		= task_util;
+		__entry->targeted_cpus		= targeted_cpus;
+		__entry->nrg_pack		= nrg_pack;
+		__entry->nrg_spread		= nrg_spread;
+	),
+
+	TP_printk("comm=%s pid=%d task_util=%lu targeted_cpus=%d nrg_pack=%d nrg_spread=%d nrg_diff=%d",
+		__entry->comm, __entry->pid, __entry->task_util,
+		__entry->targeted_cpus, __entry->nrg_pack,
+		__entry->nrg_spread, __entry->nrg_pack - __entry->nrg_spread)
+);
+
+DECLARE_EVENT_CLASS(sched_task_util,
+
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN	)
+		__field(int, pid			)
+		__field(int, task_cpu			)
+		__field(unsigned long, task_util	)
+		__field(unsigned long, cpu_util_freq	)
+		__field(int, nominated_cpu		)
+		__field(int, target_cpu			)
+		__field(int, ediff			)
+		__field(bool, need_idle			)
+		__field(u64, latency			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid			= p->pid;
+		__entry->task_cpu		= task_cpu;
+		__entry->task_util		= task_util;
+		__entry->cpu_util_freq		= cpu_util_freq(target_cpu, NULL);
+		__entry->nominated_cpu		= nominated_cpu;
+		__entry->target_cpu		= target_cpu;
+		__entry->ediff			= ediff;
+		__entry->need_idle		= need_idle;
+		__entry->latency		= sched_ktime_clock() - p->ravg.mark_start;
+	),
+
+	TP_printk("comm=%s pid=%d task_cpu=%d task_util=%lu nominated_cpu=%d target_cpu=%d energy_diff=%d need_idle=%d latency=%llu",
+		__entry->comm, __entry->pid, __entry->task_cpu, __entry->task_util, __entry->nominated_cpu, __entry->target_cpu, __entry->ediff, __entry->need_idle, __entry->latency)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_bias_to_waker,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_colocated,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_energy_diff,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_energy_aware,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_imbalance,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+#endif
+
 /*
  * Tracepoint for waking up a task:
  */
@@ -1539,6 +1749,43 @@
 		__entry->capb, __entry->capa, __entry->capd,
 		__entry->nrgn, __entry->nrgp)
 );
+
+TRACE_EVENT(sched_group_energy,
+
+	TP_PROTO(int cpu, long group_util, u64 total_nrg,
+		 int busy_nrg, int idle_nrg, int grp_idle_idx,
+		 int new_capacity),
+
+	TP_ARGS(cpu, group_util, total_nrg,
+		busy_nrg, idle_nrg, grp_idle_idx,
+		new_capacity),
+
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(long, group_util)
+		__field(u64, total_nrg)
+		__field(int, busy_nrg)
+		__field(int, idle_nrg)
+		__field(int, grp_idle_idx)
+		__field(int, new_capacity)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->group_util = group_util;
+		__entry->total_nrg = total_nrg;
+		__entry->busy_nrg = busy_nrg;
+		__entry->idle_nrg = idle_nrg;
+		__entry->grp_idle_idx = grp_idle_idx;
+		__entry->new_capacity = new_capacity;
+	),
+
+	TP_printk("cpu=%d group_util=%ld total_nrg=%llu busy_nrg=%d idle_nrg=%d grp_idle_idx=%d new_capacity=%d",
+		  __entry->cpu, __entry->group_util,
+		  __entry->total_nrg, __entry->busy_nrg, __entry->idle_nrg,
+		  __entry->grp_idle_idx, __entry->new_capacity)
+);
+
 /*
  * Tracepoint for schedtune_tasks_update
  */
diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h
index 143dacb..deb98c7 100644
--- a/include/uapi/asm-generic/ioctls.h
+++ b/include/uapi/asm-generic/ioctls.h
@@ -77,6 +77,9 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCPMGET	0x5441	/* PM get */
+#define TIOCPMPUT	0x5442	/* PM put */
+#define TIOCPMACT	0x5443	/* PM is active */
 
 #define FIONCLEX	0x5450
 #define FIOCLEX		0x5451
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 74034c6..44b42a6 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -356,4 +356,14 @@
 	struct drm_clip_rect roi[SDE_MAX_ROI_V1];
 };
 
+/**
+ * Define extended power modes supported by the SDE connectors.
+ */
+#define SDE_MODE_DPMS_ON	0
+#define SDE_MODE_DPMS_LP1	1
+#define SDE_MODE_DPMS_LP2	2
+#define SDE_MODE_DPMS_STANDBY	3
+#define SDE_MODE_DPMS_SUSPEND	4
+#define SDE_MODE_DPMS_OFF	5
+
 #endif /* _SDE_DRM_H_ */
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index f6598d1..316e838 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,7 +34,7 @@
 #define RTF_PREF(pref)	((pref) << 27)
 #define RTF_PREF_MASK	0x18000000
 
-#define RTF_PCPU	0x40000000
+#define RTF_PCPU	0x40000000	/* read-only: can not be set by user */
 #define RTF_LOCAL	0x80000000
 
 
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index c190446..f05155b 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -67,6 +67,8 @@
 #define KGSL_CONTEXT_TYPE_RS		4
 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
 
+#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
+
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
 /*
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a62870e..cf96ac1 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1061,6 +1061,8 @@
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 106)
 #define V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX \
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 107)
+#define V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 108)
 
 enum v4l2_mpeg_vidc_video_venc_iframesize_type {
 	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 5f375c4..478f7fe 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,3 +1,4 @@
+header-y += cam_cpas.h
 header-y += cam_defs.h
 header-y += cam_isp.h
 header-y += cam_isp_vfe.h
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
new file mode 100644
index 0000000..300bd87
--- /dev/null
+++ b/include/uapi/media/cam_cpas.h
@@ -0,0 +1,23 @@
+#ifndef __UAPI_CAM_CPAS_H__
+#define __UAPI_CAM_CPAS_H__
+
+#include "cam_defs.h"
+
+#define CAM_FAMILY_CAMERA_SS     1
+#define CAM_FAMILY_CPAS_SS       2
+
+/**
+ * struct cam_cpas_query_cap - CPAS query device capability payload
+ *
+ * @camera_family     : Camera family type
+ * @reserved          : Reserved field for alignment
+ * @camera_version    : Camera version
+ *
+ */
+struct cam_cpas_query_cap {
+	uint32_t                 camera_family;
+	uint32_t                 reserved;
+	struct cam_hw_version    camera_version;
+};
+
+#endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 68db5e1..b736755 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -190,9 +190,6 @@
 #define CAM_REQ_MGR_MAP_BUF                     (CAM_COMMON_OPCODE_MAX + 10)
 #define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
 #define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
-#define CAM_REQ_MGR_GET_MMU_HDLS_DEBUG          (CAM_COMMON_OPCODE_MAX + 13)
-#define CAM_REQ_MGR_GET_IO_BUF_DEBUG            (CAM_COMMON_OPCODE_MAX + 14)
-#define CAM_REQ_MGR_GET_KMD_BUF_DEBUG           (CAM_COMMON_OPCODE_MAX + 15)
 /* end of cam_req_mgr opcodes */
 
 #define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
@@ -206,6 +203,7 @@
 #define CAM_MEM_FLAG_STATS_BUF_TYPE             (1<<8)
 #define CAM_MEM_FLAG_PACKET_BUF_TYPE            (1<<9)
 #define CAM_MEM_FLAG_CACHE                      (1<<10)
+#define CAM_MEM_FLAG_HW_SHARED_ACCESS           (1<<11)
 
 #define CAM_MEM_MMU_MAX_HANDLE                  16
 
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index ac559f2..7161102 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -238,6 +238,9 @@
 	MSM_VIDC_EXTRADATA_FRAME_BITS_INFO = 0x00000010,
 	MSM_VIDC_EXTRADATA_VQZIP_SEI = 0x00000011,
 	MSM_VIDC_EXTRADATA_ROI_QP = 0x00000013,
+#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x00000014,
 #define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI \
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI = 0x00000015,
@@ -252,9 +255,6 @@
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP = 0x0700000F,
 	MSM_VIDC_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
-#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x070000011,
 	MSM_VIDC_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
 	MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
 	MSM_VIDC_EXTRADATA_INDEX = 0x7F100002,
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index b0350f0..27e9ef8 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -18,7 +18,6 @@
 header-y += audio_slimslave.h
 header-y += voice_params.h
 header-y += audio_effects.h
-header-y += voice_svc.h
 header-y += devdep_params.h
 header-y += msmcal-hwdep.h
 header-y += wcd-dsp-glink.h
diff --git a/init/Kconfig b/init/Kconfig
index 007186d..bdfcc0f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -795,19 +795,6 @@
 
 endchoice
 
-config RCU_EXPEDITE_BOOT
-	bool
-	default n
-	help
-	  This option enables expedited grace periods at boot time,
-	  as if rcu_expedite_gp() had been invoked early in boot.
-	  The corresponding rcu_unexpedite_gp() is invoked from
-	  rcu_end_inkernel_boot(), which is intended to be invoked
-	  at the end of the kernel-only boot sequence, just before
-	  init is exec'ed.
-
-	  Accept the default if unsure.
-
 endmenu # "RCU Subsystem"
 
 config BUILD_BIN2C
@@ -1274,6 +1261,7 @@
 
 config SCHED_HMP
 	bool "Scheduler support for heterogenous multi-processor systems"
+	select SCHED_WALT
 	depends on SMP && FAIR_GROUP_SCHED
 	help
 	  This feature will let the scheduler optimize task placement on
@@ -1281,6 +1269,13 @@
 	  in their instructions per-cycle capability or the maximum
 	  frequency they can attain.
 
+config SCHED_WALT
+	bool "WALT"
+	depends on SMP && FAIR_GROUP_SCHED
+	help
+	  Use Window-Assisted Load Tracking (WALT) as an alternative or
+	  additional load tracking scheme in lieu of or along with PELT.
+
 config SCHED_HMP_CSTATE_AWARE
 	bool "CPU C-state aware scheduler"
 	depends on SCHED_HMP
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85d1c94..7c9f94c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1829,14 +1829,15 @@
 
 	for (i = 0; i < MAX_BPF_REG; i++)
 		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
-			regs[i].range = dst_reg->off;
+			/* keep the maximum range already checked */
+			regs[i].range = max(regs[i].range, dst_reg->off);
 
 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
 		if (state->stack_slot_type[i] != STACK_SPILL)
 			continue;
 		reg = &state->spilled_regs[i / BPF_REG_SIZE];
 		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
-			reg->range = dst_reg->off;
+			reg->range = max(reg->range, dst_reg->off);
 	}
 }
 
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 30e0107..3c32c74 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -29,7 +29,6 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INET6_AH=y
-CONFIG_INET6_DIAG_DESTROY=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_INET=y
@@ -72,7 +71,6 @@
 CONFIG_NET=y
 CONFIG_NETDEVICES=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_TPROXY=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -173,5 +171,4 @@
 CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_OTG_WAKELOCK=y
 CONFIG_XFRM_USER=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1d203e1..21a8764 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1488,14 +1488,12 @@
 	/* (Un)Install the callbacks for further cpu hotplug operations */
 	struct cpuhp_step *sp;
 
-	mutex_lock(&cpuhp_state_mutex);
 	sp = cpuhp_get_step(state);
 	sp->startup.single = startup;
 	sp->teardown.single = teardown;
 	sp->name = name;
 	sp->multi_instance = multi_instance;
 	INIT_HLIST_HEAD(&sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 }
 
 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@@ -1565,16 +1563,13 @@
 {
 	enum cpuhp_state i;
 
-	mutex_lock(&cpuhp_state_mutex);
 	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
 		if (cpuhp_ap_states[i].name)
 			continue;
 
 		cpuhp_ap_states[i].name = "Reserved";
-		mutex_unlock(&cpuhp_state_mutex);
 		return i;
 	}
-	mutex_unlock(&cpuhp_state_mutex);
 	WARN(1, "No more dynamic states available for CPU hotplug\n");
 	return -ENOSPC;
 }
@@ -1591,6 +1586,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (!invoke || !sp->startup.multi)
 		goto add_node;
@@ -1615,11 +1611,10 @@
 	}
 add_node:
 	ret = 0;
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_add_head(node, &sp->list);
-	mutex_unlock(&cpuhp_state_mutex);
 
 err:
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 	return ret;
 }
@@ -1648,6 +1643,7 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	/* currently assignments for the ONLINE state are possible */
 	if (state == CPUHP_AP_ONLINE_DYN) {
@@ -1683,6 +1679,8 @@
 		}
 	}
 out:
+	mutex_unlock(&cpuhp_state_mutex);
+
 	put_online_cpus();
 	if (!ret && dyn_state)
 		return state;
@@ -1702,6 +1700,8 @@
 		return -EINVAL;
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
+
 	if (!invoke || !cpuhp_get_teardown_cb(state))
 		goto remove;
 	/*
@@ -1718,7 +1718,6 @@
 	}
 
 remove:
-	mutex_lock(&cpuhp_state_mutex);
 	hlist_del(node);
 	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
@@ -1743,6 +1742,7 @@
 	BUG_ON(cpuhp_cb_check(state));
 
 	get_online_cpus();
+	mutex_lock(&cpuhp_state_mutex);
 
 	if (sp->multi_instance) {
 		WARN(!hlist_empty(&sp->list),
@@ -1768,6 +1768,7 @@
 	}
 remove:
 	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+	mutex_unlock(&cpuhp_state_mutex);
 	put_online_cpus();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 104432f..dac3724 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -78,6 +78,9 @@
 		bool affinity_broken;
 
 		desc = irq_to_desc(irq);
+		if (!desc)
+			continue;
+
 		raw_spin_lock(&desc->lock);
 		affinity_broken = migrate_one_irq(desc);
 		raw_spin_unlock(&desc->lock);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4f6db7e..9e03db9 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -132,8 +132,7 @@
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
-static atomic_t rcu_expedited_nesting =
-	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
+static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
 
 /*
  * Should normal grace-period primitives be expedited?  Intended for
@@ -182,8 +181,7 @@
  */
 void rcu_end_inkernel_boot(void)
 {
-	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
-		rcu_unexpedite_gp();
+	rcu_unexpedite_gp();
 	if (rcu_normal_after_boot)
 		WRITE_ONCE(rcu_normal, 1);
 }
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 90d10e8..27a7574 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -20,6 +20,7 @@
 obj-y += wait.o swait.o completion.o idle.o sched_avg.o
 obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 5bdd51b..f5e87791 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/jiffies.h>
 #include "sched.h"
 #include <linux/of.h>
 #include <linux/sched/core_ctl.h>
@@ -139,6 +140,7 @@
 	case RESTRAINED_BOOST:
 		freq_aggr_threshold_backup =
 			update_freq_aggregate_threshold(1);
+		mod_timer(&sched_grp_timer, jiffies + 1);
 		break;
 
 	default:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f7f5256..ccb2321 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -76,7 +76,6 @@
 #include <linux/frame.h>
 #include <linux/prefetch.h>
 #include <linux/irq.h>
-#include <linux/sched/core_ctl.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -87,6 +86,7 @@
 #endif
 
 #include "sched.h"
+#include "walt.h"
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
 #include "../time/tick-internal.h"
@@ -801,6 +801,9 @@
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible++;
 
+	if (flags & DEQUEUE_SLEEP)
+		clear_ed_task(p, rq);
+
 	dequeue_task(rq, p, flags);
 }
 
@@ -2193,6 +2196,9 @@
 		notif_required = true;
 	}
 
+	if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
+		inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));
+
 	note_task_waking(p, wallclock);
 #endif /* CONFIG_SMP */
 
@@ -2265,6 +2271,8 @@
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+		if (!__task_in_cum_window_demand(rq, p))
+			inc_cum_window_demand(rq, p, task_load(p));
 		cpufreq_update_util(rq, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 		note_task_waking(p, wallclock);
@@ -2352,8 +2360,9 @@
 	p->se.prev_sum_exec_runtime	= 0;
 	p->se.nr_migrations		= 0;
 	p->se.vruntime			= 0;
+	p->last_sleep_ts		= 0;
+
 	INIT_LIST_HEAD(&p->se.group_node);
-	walt_init_new_task_load(p);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	p->se.cfs_rq			= NULL;
@@ -2718,8 +2727,6 @@
 	add_new_task_to_grp(p);
 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 
-	walt_init_new_task_load(p);
-
 	p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
 	/*
@@ -3255,15 +3262,9 @@
 	return ns;
 }
 
-#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+unsigned int capacity_margin_freq = 1280; /* ~20% margin */
 
-static inline
-unsigned long add_capacity_margin(unsigned long cpu_capacity)
-{
-	cpu_capacity  = cpu_capacity * capacity_margin;
-	cpu_capacity /= SCHED_CAPACITY_SCALE;
-	return cpu_capacity;
-}
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
 
 static inline
 unsigned long sum_capacity_reqs(unsigned long cfs_cap,
@@ -3287,7 +3288,7 @@
 	 * To make free room for a task that is building up its "real"
 	 * utilization and to harm its performance the least, request
 	 * a jump to a higher OPP as soon as the margin of free capacity
-	 * is impacted (specified by capacity_margin).
+	 * is impacted (specified by capacity_margin_freq).
 	 */
 	set_cfs_cpu_capacity(cpu, true, cpu_utilization);
 }
@@ -3295,22 +3296,13 @@
 #ifdef CONFIG_SCHED_WALT
 static void sched_freq_tick_walt(int cpu)
 {
-	unsigned long cpu_utilization = cpu_util(cpu);
-	unsigned long capacity_curr = capacity_curr_of(cpu);
+	unsigned long cpu_utilization = cpu_util_freq(cpu, NULL);
 
 	if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
 		return sched_freq_tick_pelt(cpu);
 
-	/*
-	 * Add a margin to the WALT utilization.
-	 * NOTE: WALT tracks a single CPU signal for all the scheduling
-	 * classes, thus this margin is going to be added to the DL class as
-	 * well, which is something we do not do in sched_freq_tick_pelt case.
-	 */
-	cpu_utilization = add_capacity_margin(cpu_utilization);
-	if (cpu_utilization <= capacity_curr)
-		return;
-
+	cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE /
+			  capacity_orig_of(cpu);
 	/*
 	 * It is likely that the load is growing so we
 	 * keep the added margin in our request as an
@@ -3326,22 +3318,42 @@
 
 static void sched_freq_tick(int cpu)
 {
-	unsigned long capacity_orig, capacity_curr;
-
 	if (!sched_freq())
 		return;
 
-	capacity_orig = capacity_orig_of(cpu);
-	capacity_curr = capacity_curr_of(cpu);
-	if (capacity_curr == capacity_orig)
-		return;
-
 	_sched_freq_tick(cpu);
 }
 #else
 static inline void sched_freq_tick(int cpu) { }
 #endif /* CONFIG_CPU_FREQ_GOV_SCHED */
 
+#ifdef CONFIG_SCHED_WALT
+static atomic64_t walt_irq_work_lastq_ws;
+
+static inline u64 walt_window_start_of(struct rq *rq)
+{
+	return rq->window_start;
+}
+
+static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
+{
+	/* No HMP since that uses sched_get_cpus_busy */
+	if (rq->window_start != window_start &&
+		atomic_cmpxchg(&walt_irq_work_lastq_ws, window_start,
+			   rq->window_start) == window_start)
+		irq_work_queue(&rq->irq_work);
+}
+#else
+static inline u64 walt_window_start_of(struct rq *rq)
+{
+	return 0;
+}
+
+static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
+{
+}
+#endif
+
 /*
  * This function gets called by the timer code, with HZ frequency.
  * We call it with interrupts disabled.
@@ -3355,25 +3367,33 @@
 	bool early_notif;
 	u32 old_load;
 	struct related_thread_group *grp;
+	u64 window_start;
 
 	sched_clock_tick();
 
 	raw_spin_lock(&rq->lock);
 
+	/*
+	 * Record current window_start. If after utra() below the window
+	 * has rolled over, schedule a load-reporting irq-work
+	 */
+	window_start = walt_window_start_of(rq);
+
 	old_load = task_load(curr);
 	set_window_start(rq);
 
-	update_rq_clock(rq);
-	curr->sched_class->task_tick(rq, curr, 0);
-	cpu_load_update_active(rq);
-	walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
-			walt_ktime_clock(), 0);
-	calc_global_load_tick(rq);
 
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 
+	update_rq_clock(rq);
+	curr->sched_class->task_tick(rq, curr, 0);
+	cpu_load_update_active(rq);
+	calc_global_load_tick(rq);
 	cpufreq_update_util(rq, 0);
+
+	run_walt_irq_work(window_start, rq);
+
 	early_notif = early_detection_notify(rq, wallclock);
 
 	raw_spin_unlock(&rq->lock);
@@ -3398,8 +3418,6 @@
 	if (curr->sched_class == &fair_sched_class)
 		check_for_migration(rq, curr);
 
-	if (cpu == tick_do_timer_cpu)
-		core_ctl_check(wallclock);
 	sched_freq_tick(cpu);
 }
 
@@ -3706,6 +3724,9 @@
 
 	wallclock = sched_ktime_clock();
 	if (likely(prev != next)) {
+		if (!prev->on_rq)
+			prev->last_sleep_ts = wallclock;
+
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
 		cpufreq_update_util(rq, 0);
@@ -8163,7 +8184,6 @@
 {
 	cpumask_var_t non_isolated_cpus;
 
-	walt_init_cpu_efficiency();
 	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
 	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
@@ -8377,10 +8397,12 @@
 		rq->avg_idle = 2*sysctl_sched_migration_cost;
 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
 		rq->push_task = NULL;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 		cpumask_set_cpu(i, &rq->freq_domain_cpumask);
+		init_irq_work(&rq->irq_work, walt_irq_work);
 		rq->hmp_stats.cumulative_runnable_avg = 0;
 		rq->window_start = 0;
+		rq->cum_window_start = 0;
 		rq->hmp_stats.nr_big_tasks = 0;
 		rq->hmp_flags = 0;
 		rq->cur_irqload = 0;
@@ -8406,6 +8428,7 @@
 		rq->old_estimated_time = 0;
 		rq->old_busy_time_group = 0;
 		rq->hmp_stats.pred_demands_sum = 0;
+		rq->ed_task = NULL;
 		rq->curr_table = 0;
 		rq->prev_top = 0;
 		rq->curr_top = 0;
@@ -8422,6 +8445,7 @@
 
 			clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
 		}
+		rq->cum_window_demand = 0;
 #endif
 		INIT_LIST_HEAD(&rq->cfs_tasks);
 
@@ -9646,7 +9670,7 @@
  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 };
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 /*
  * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
  *
@@ -9674,6 +9698,7 @@
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	dequeue_task(rq, p, 0);
 	reset_task_stats(p);
+	dec_cum_window_demand(rq, p);
 	p->ravg.mark_start = wallclock;
 	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
 	free_task_load_ptrs(p);
@@ -9682,4 +9707,4 @@
 	clear_ed_task(p, rq);
 	task_rq_unlock(rq, p, &rf);
 }
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1040a43..005d15e 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -45,7 +45,6 @@
 	spinlock_t pending_lock;
 	bool is_big_cluster;
 	int nrrun;
-	bool nrrun_changed;
 	struct task_struct *core_ctl_thread;
 	unsigned int first_cpu;
 	unsigned int boost;
@@ -433,33 +432,16 @@
 
 /* ==================== runqueue based core count =================== */
 
-#define RQ_AVG_TOLERANCE 2
-#define RQ_AVG_DEFAULT_MS 20
 #define NR_RUNNING_TOLERANCE 5
-static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
 
-static s64 rq_avg_timestamp_ms;
-
-static void update_running_avg(bool trigger_update)
+static void update_running_avg(void)
 {
-	int avg, iowait_avg, big_avg, old_nrrun;
-	s64 now;
-	unsigned long flags;
+	int avg, iowait_avg, big_avg;
 	struct cluster_data *cluster;
 	unsigned int index = 0;
 
-	spin_lock_irqsave(&state_lock, flags);
-
-	now = ktime_to_ms(ktime_get());
-	if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
-		spin_unlock_irqrestore(&state_lock, flags);
-		return;
-	}
-	rq_avg_timestamp_ms = now;
 	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
 
-	spin_unlock_irqrestore(&state_lock, flags);
-
 	/*
 	 * Round up to the next integer if the average nr running tasks
 	 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
@@ -478,7 +460,6 @@
 	for_each_cluster(cluster, index) {
 		if (!cluster->inited)
 			continue;
-		old_nrrun = cluster->nrrun;
 		/*
 		 * Big cluster only need to take care of big tasks, but if
 		 * there are not enough big cores, big tasks need to be run
@@ -489,14 +470,7 @@
 		 * than scheduler, and can't predict scheduler's behavior.
 		 */
 		cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
-		if (cluster->nrrun != old_nrrun) {
-			if (trigger_update)
-				apply_need(cluster);
-			else
-				cluster->nrrun_changed = true;
-		}
 	}
-	return;
 }
 
 /* adjust needed CPUs based on current runqueue information */
@@ -605,24 +579,15 @@
 		wake_up_core_ctl_thread(cluster);
 }
 
-static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
+static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy)
 {
-	struct cpu_data *c = &per_cpu(cpu_state, cpu);
-	struct cluster_data *cluster = c->cluster;
 	unsigned int old_is_busy = c->is_busy;
 
-	if (!cluster || !cluster->inited)
-		return 0;
+	if (c->busy == busy)
+		return;
 
-	update_running_avg(false);
-	if (c->busy == busy && !cluster->nrrun_changed)
-		return 0;
 	c->busy = busy;
-	cluster->nrrun_changed = false;
-
-	apply_need(cluster);
-	trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
-	return 0;
+	trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy);
 }
 
 /* ========================= core count enforcement ==================== */
@@ -639,21 +604,6 @@
 }
 
 static u64 core_ctl_check_timestamp;
-static u64 core_ctl_check_interval;
-
-static bool do_check(u64 wallclock)
-{
-	bool do_check = false;
-	unsigned long flags;
-
-	spin_lock_irqsave(&state_lock, flags);
-	if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
-		core_ctl_check_timestamp = wallclock;
-		do_check = true;
-	}
-	spin_unlock_irqrestore(&state_lock, flags);
-	return do_check;
-}
 
 int core_ctl_set_boost(bool boost)
 {
@@ -695,21 +645,39 @@
 }
 EXPORT_SYMBOL(core_ctl_set_boost);
 
-void core_ctl_check(u64 wallclock)
+void core_ctl_check(u64 window_start)
 {
+	int cpu;
+	unsigned int busy;
+	struct cpu_data *c;
+	struct cluster_data *cluster;
+	unsigned int index = 0;
+
 	if (unlikely(!initialized))
 		return;
 
-	if (do_check(wallclock)) {
-		unsigned int index = 0;
-		struct cluster_data *cluster;
+	if (window_start == core_ctl_check_timestamp)
+		return;
 
-		update_running_avg(true);
+	core_ctl_check_timestamp = window_start;
 
-		for_each_cluster(cluster, index) {
-			if (eval_need(cluster))
-				wake_up_core_ctl_thread(cluster);
-		}
+	for_each_possible_cpu(cpu) {
+
+		c = &per_cpu(cpu_state, cpu);
+		cluster = c->cluster;
+
+		if (!cluster || !cluster->inited)
+			continue;
+
+		busy = sched_get_cpu_util(cpu);
+		core_ctl_set_busy(c, busy);
+	}
+
+	update_running_avg();
+
+	for_each_cluster(cluster, index) {
+		if (eval_need(cluster))
+			wake_up_core_ctl_thread(cluster);
 	}
 }
 
@@ -1079,74 +1047,25 @@
 	return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
 }
 
-static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
-				void *data)
-{
-	struct cpufreq_policy *policy = data;
-	int ret;
-
-	switch (val) {
-	case CPUFREQ_CREATE_POLICY:
-		ret = cluster_init(policy->related_cpus);
-		if (ret)
-			pr_warn("unable to create core ctl group: %d\n", ret);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block cpufreq_pol_nb = {
-	.notifier_call = cpufreq_policy_cb,
-};
-
-static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
-				void *data)
-{
-	struct cpufreq_govinfo *info = data;
-
-	switch (val) {
-	case CPUFREQ_LOAD_CHANGE:
-		core_ctl_set_busy(info->cpu, info->load);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block cpufreq_gov_nb = {
-	.notifier_call = cpufreq_gov_cb,
-};
-
 static int __init core_ctl_init(void)
 {
 	unsigned int cpu;
+	struct cpumask cpus = *cpu_possible_mask;
 
 	if (should_skip(cpu_possible_mask))
 		return 0;
 
-	core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
-					* NSEC_PER_MSEC;
-
 	register_cpu_notifier(&cpu_notifier);
-	cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
-	cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
 
-	cpu_maps_update_begin();
-	for_each_online_cpu(cpu) {
-		struct cpufreq_policy *policy;
+	for_each_cpu(cpu, &cpus) {
 		int ret;
+		const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
 
-		policy = cpufreq_cpu_get(cpu);
-		if (policy) {
-			ret = cluster_init(policy->related_cpus);
-			if (ret)
-				pr_warn("unable to create core ctl group: %d\n"
-					, ret);
-			cpufreq_cpu_put(policy);
-		}
+		ret = cluster_init(cluster_cpus);
+		if (ret)
+			pr_warn("unable to create core ctl group: %d\n", ret);
+		cpumask_andnot(&cpus, &cpus, cluster_cpus);
 	}
-	cpu_maps_update_done();
 	initialized = true;
 	return 0;
 }
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index 1d471d5..11b75e3 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -234,8 +234,12 @@
 
 	scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
 
-	new_capacity = scr->cfs + scr->rt;
-	new_capacity = new_capacity * capacity_margin
+#ifdef CONFIG_SCHED_WALT
+	if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+		new_capacity = scr->cfs + scr->rt;
+#endif
+	new_capacity = scr->cfs;
+	new_capacity = new_capacity * capacity_margin_freq
 		/ SCHED_CAPACITY_SCALE;
 	new_capacity += scr->dl;
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 27d96e2..42630ec 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -23,6 +23,7 @@
 struct sugov_tunables {
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
+	unsigned int hispeed_freq;
 };
 
 struct sugov_policy {
@@ -34,8 +35,11 @@
 	raw_spinlock_t update_lock;  /* For shared policies */
 	u64 last_freq_update_time;
 	s64 freq_update_delay_ns;
+	u64 hispeed_jmp_ts;
 	unsigned int next_freq;
 	unsigned int cached_raw_freq;
+	unsigned long hispeed_util;
+	unsigned long max;
 
 	/* The next fields are only needed if fast switch cannot be used. */
 	struct irq_work irq_work;
@@ -56,10 +60,13 @@
 	unsigned long iowait_boost_max;
 	u64 last_update;
 
+	struct sched_walt_cpu_load walt_load;
+
 	/* The fields below are only needed when sharing a policy. */
 	unsigned long util;
 	unsigned long max;
 	unsigned int flags;
+	unsigned int cpu;
 };
 
 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -70,9 +77,6 @@
 {
 	s64 delta_ns;
 
-	if (sg_policy->work_in_progress)
-		return false;
-
 	if (unlikely(sg_policy->need_freq_update)) {
 		sg_policy->need_freq_update = false;
 		/*
@@ -113,6 +117,7 @@
 	}
 }
 
+#define TARGET_LOAD 80
 /**
  * get_next_freq - Compute a new frequency for a given cpufreq policy.
  * @sg_policy: schedutil policy object to compute the new frequency for.
@@ -150,15 +155,18 @@
 	return cpufreq_driver_resolve_freq(policy, freq);
 }
 
-static void sugov_get_util(unsigned long *util, unsigned long *max)
+static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
 {
-	struct rq *rq = this_rq();
+	struct rq *rq = cpu_rq(cpu);
 	unsigned long cfs_max;
+	struct sugov_cpu *loadcpu = &per_cpu(sugov_cpu, cpu);
 
-	cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
+	cfs_max = arch_scale_cpu_capacity(NULL, cpu);
 
 	*util = min(rq->cfs.avg.util_avg, cfs_max);
 	*max = cfs_max;
+
+	*util = cpu_util_freq(cpu, &loadcpu->walt_load);
 }
 
 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
@@ -191,6 +199,34 @@
 	sg_cpu->iowait_boost >>= 1;
 }
 
+#define NL_RATIO 75
+#define HISPEED_LOAD 90
+static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
+			      unsigned long *max)
+{
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+	unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
+	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
+	unsigned long nl = sg_cpu->walt_load.nl;
+	unsigned long cpu_util = sg_cpu->util;
+	bool is_hiload;
+
+	is_hiload = (cpu_util >= mult_frac(cap_cur,
+					   HISPEED_LOAD,
+					   100));
+
+	if (is_hiload && !is_migration &&
+	    sg_policy->next_freq < sg_policy->tunables->hispeed_freq) {
+		*util = max(*util, sg_policy->hispeed_util);
+		sg_policy->hispeed_jmp_ts = sg_cpu->last_update;
+	}
+
+	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
+		*util = *max;
+
+	*util = max(*util, sg_cpu->walt_load.pl);
+}
+
 static void sugov_update_single(struct update_util_data *hook, u64 time,
 				unsigned int flags)
 {
@@ -206,11 +242,14 @@
 	if (!sugov_should_update_freq(sg_policy, time))
 		return;
 
+	flags &= ~SCHED_CPUFREQ_RT_DL;
+
 	if (flags & SCHED_CPUFREQ_RT_DL) {
 		next_f = policy->cpuinfo.max_freq;
 	} else {
-		sugov_get_util(&util, &max);
+		sugov_get_util(&util, &max, sg_cpu->cpu);
 		sugov_iowait_boost(sg_cpu, &util, &max);
+		sugov_walt_adjust(sg_cpu, &util, &max);
 		next_f = get_next_freq(sg_policy, util, max);
 	}
 	sugov_update_commit(sg_policy, time, next_f);
@@ -230,13 +269,14 @@
 		return max_f;
 
 	sugov_iowait_boost(sg_cpu, &util, &max);
+	sugov_walt_adjust(sg_cpu, &util, &max);
 
 	for_each_cpu(j, policy->cpus) {
 		struct sugov_cpu *j_sg_cpu;
 		unsigned long j_util, j_max;
 		s64 delta_ns;
 
-		if (j == smp_processor_id())
+		if (j == sg_cpu->cpu)
 			continue;
 
 		j_sg_cpu = &per_cpu(sugov_cpu, j);
@@ -248,7 +288,7 @@
 		 * idle now (and clear iowait_boost for it).
 		 */
 		delta_ns = last_freq_update_time - j_sg_cpu->last_update;
-		if (delta_ns > TICK_NSEC) {
+		if (delta_ns > sched_ravg_window) {
 			j_sg_cpu->iowait_boost = 0;
 			continue;
 		}
@@ -263,6 +303,7 @@
 		}
 
 		sugov_iowait_boost(j_sg_cpu, &util, &max);
+		sugov_walt_adjust(j_sg_cpu, &util, &max);
 	}
 
 	return get_next_freq(sg_policy, util, max);
@@ -273,13 +314,24 @@
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long util, max;
+	unsigned long util, max, hs_util;
 	unsigned int next_f;
 
-	sugov_get_util(&util, &max);
+	sugov_get_util(&util, &max, sg_cpu->cpu);
+
+	flags &= ~SCHED_CPUFREQ_RT_DL;
 
 	raw_spin_lock(&sg_policy->update_lock);
 
+	if (sg_policy->max != max) {
+		hs_util = mult_frac(max,
+				    sg_policy->tunables->hispeed_freq,
+				    sg_policy->policy->cpuinfo.max_freq);
+		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+		sg_policy->hispeed_util = hs_util;
+		sg_policy->max = max;
+	}
+
 	sg_cpu->util = util;
 	sg_cpu->max = max;
 	sg_cpu->flags = flags;
@@ -287,6 +339,10 @@
 	sugov_set_iowait_boost(sg_cpu, time, flags);
 	sg_cpu->last_update = time;
 
+	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max,
+				sg_cpu->walt_load.nl,
+				sg_cpu->walt_load.pl, flags);
+
 	if (sugov_should_update_freq(sg_policy, time)) {
 		next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
 		sugov_update_commit(sg_policy, time, next_f);
@@ -364,10 +420,42 @@
 	return count;
 }
 
+static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
+					const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+	unsigned int val;
+	struct sugov_policy *sg_policy;
+	unsigned long hs_util;
+
+	if (kstrtouint(buf, 10, &val))
+		return -EINVAL;
+
+	tunables->hispeed_freq = val;
+	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+		hs_util = mult_frac(sg_policy->max,
+				    sg_policy->tunables->hispeed_freq,
+				    sg_policy->policy->cpuinfo.max_freq);
+		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+		sg_policy->hispeed_util = hs_util;
+	}
+
+	return count;
+}
+
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
+	&hispeed_freq.attr,
 	NULL
 };
 
@@ -512,6 +600,7 @@
 	}
 
 	tunables->rate_limit_us = LATENCY_MULTIPLIER;
+	tunables->hispeed_freq = 0;
 	lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 	if (lat)
 		tunables->rate_limit_us *= lat;
@@ -585,6 +674,7 @@
 
 		memset(sg_cpu, 0, sizeof(*sg_cpu));
 		sg_cpu->sg_policy = sg_policy;
+		sg_cpu->cpu = cpu;
 		sg_cpu->flags = SCHED_CPUFREQ_RT;
 		sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0085f66..10a807c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -15,10 +15,11 @@
  *                    Fabio Checconi <fchecconi@gmail.com>
  */
 #include "sched.h"
+#include "walt.h"
 
 #include <linux/slab.h>
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 
 static void
 inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
@@ -43,7 +44,7 @@
 				      pred_demand_delta);
 }
 
-#else	/* CONFIG_SCHED_HMP */
+#else	/* CONFIG_SCHED_WALT */
 
 static inline void
 inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
@@ -51,7 +52,7 @@
 static inline void
 dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
 
-#endif	/* CONFIG_SCHED_HMP */
+#endif	/* CONFIG_SCHED_WALT */
 
 struct dl_bandwidth def_dl_bandwidth;
 
@@ -1843,7 +1844,7 @@
 	.switched_to		= switched_to_dl,
 
 	.update_curr		= update_curr_dl,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
 #endif
 };
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index ae8bd29..ed9f6db 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -19,6 +19,7 @@
 #include <linux/debugfs.h>
 
 #include "sched.h"
+#include "walt.h"
 
 static DEFINE_SPINLOCK(sched_debug_lock);
 
@@ -599,7 +600,7 @@
 			cfs_rq->throttle_count);
 	SEQ_printf(m, "  .%-30s: %d\n", "runtime_enabled",
 			cfs_rq->runtime_enabled);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	SEQ_printf(m, "  .%-30s: %d\n", "nr_big_tasks",
 			cfs_rq->hmp_stats.nr_big_tasks);
 	SEQ_printf(m, "  .%-30s: %llu\n", "cumulative_runnable_avg",
@@ -696,9 +697,11 @@
 #ifdef CONFIG_SMP
 	P(cpu_capacity);
 #endif
+#ifdef CONFIG_SCHED_WALT
 #ifdef CONFIG_SCHED_HMP
 	P(static_cpu_pwr_cost);
 	P(cluster->static_cluster_pwr_cost);
+#endif
 	P(cluster->load_scale_factor);
 	P(cluster->capacity);
 	P(cluster->max_possible_capacity);
@@ -706,7 +709,9 @@
 	P(cluster->cur_freq);
 	P(cluster->max_freq);
 	P(cluster->exec_scale_factor);
+#ifdef CONFIG_SCHED_WALT
 	P(hmp_stats.nr_big_tasks);
+#endif
 	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
 			rq->hmp_stats.cumulative_runnable_avg);
 #endif
@@ -788,9 +793,11 @@
 	PN(sysctl_sched_wakeup_granularity);
 	P(sysctl_sched_child_runs_first);
 	P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_WALT
 #ifdef CONFIG_SCHED_HMP
 	P(sched_upmigrate);
 	P(sched_downmigrate);
+#endif
 	P(sched_init_task_load_windows);
 	P(min_capacity);
 	P(max_capacity);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
old mode 100644
new mode 100755
index 6fb615e..82e6490
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -34,6 +34,7 @@
 
 #include "sched.h"
 #include "tune.h"
+#include "walt.h"
 #include <trace/events/sched.h>
 
 /* QHMP/Zone forward declarations */
@@ -42,8 +43,12 @@
 struct sd_lb_stats;
 struct sg_lb_stats;
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand);
+#endif
 
+#ifdef CONFIG_SCHED_HMP
 #ifdef CONFIG_CFS_BANDWIDTH
 static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
 				 struct task_struct *p, int change_cra);
@@ -67,8 +72,6 @@
 				 struct task_struct *p, int change_cra) { }
 #endif /* CONFIG_CFS_BANDWIDTH */
 
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-				       u32 new_task_load, u32 new_pred_demand);
 #ifdef CONFIG_SMP
 
 static struct rq *find_busiest_queue_hmp(struct lb_env *env,
@@ -122,6 +125,10 @@
 
 #endif /* CONFIG_SCHED_HMP */
 
+#ifdef CONFIG_SCHED_WALT
+static inline bool task_fits_max(struct task_struct *p, int cpu);
+#endif
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -137,7 +144,7 @@
 unsigned int sysctl_sched_latency = 6000000ULL;
 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
 
-unsigned int sysctl_sched_is_big_little = 0;
+unsigned int sysctl_sched_is_big_little = 1;
 unsigned int sysctl_sched_sync_hint_enable = 1;
 unsigned int sysctl_sched_initial_task_util = 0;
 unsigned int sysctl_sched_cstate_aware = 1;
@@ -145,8 +152,6 @@
 #ifdef CONFIG_SCHED_WALT
 unsigned int sysctl_sched_use_walt_cpu_util = 1;
 unsigned int sysctl_sched_use_walt_task_util = 1;
-__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
-    (10 * NSEC_PER_MSEC);
 #endif
 /*
  * The initial- and re-scaling of tunables is configurable
@@ -216,7 +221,8 @@
  * The margin used when comparing utilization with CPU capacity:
  * util * 1024 < capacity * margin
  */
-unsigned int capacity_margin = 1280; /* ~20% */
+unsigned int capacity_margin = 1078; /* ~5% margin */
+unsigned int capacity_margin_down = 1205; /* ~15% margin */
 
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
@@ -4677,7 +4683,6 @@
 #endif
 
 #ifdef CONFIG_SMP
-static bool cpu_overutilized(int cpu);
 static unsigned long capacity_orig_of(int cpu);
 static unsigned long cpu_util(int cpu);
 static inline unsigned long boosted_cpu_util(int cpu);
@@ -4757,6 +4762,9 @@
 
 	if (!se) {
 		add_nr_running(rq, 1);
+#ifdef CONFIG_SCHED_WALT
+		p->misfit = !task_fits_max(p, rq->cpu);
+#endif
 		inc_rq_hmp_stats(rq, p, 1);
 	}
 
@@ -4850,20 +4858,10 @@
 #ifdef CONFIG_SMP
 
 	if (!se) {
-		/*
-		 * We want to potentially trigger a freq switch
-		 * request only for tasks that are going to sleep;
-		 * this is because we get here also during load
-		 * balancing, but in these cases it seems wise to
-		 * trigger as single request after load balancing is
-		 * done.
-		 */
-		if (task_sleep) {
-			if (rq->cfs.nr_running)
-				update_capacity_of(cpu_of(rq));
-			else if (sched_freq())
-				set_cfs_cpu_capacity(cpu_of(rq), false, 0);
-		}
+		if (rq->cfs.nr_running)
+			update_capacity_of(cpu_of(rq));
+		else if (sched_freq())
+			set_cfs_cpu_capacity(cpu_of(rq), false, 0);
 	}
 
 	/* Update SchedTune accouting */
@@ -5338,11 +5336,6 @@
 	       >> SCHED_CAPACITY_SHIFT;
 }
 
-static inline bool energy_aware(void)
-{
-	return sched_feat(ENERGY_AWARE);
-}
-
 /*
  * Externally visible function. Let's keep the one above
  * so that the check is inlined/optimized in the sched paths.
@@ -5361,6 +5354,8 @@
 	int			dst_cpu;
 	int			energy;
 	int			payoff;
+	int			sync_cpu;
+	unsigned long		curr_util;
 	struct task_struct	*task;
 	struct {
 		int before;
@@ -5391,18 +5386,89 @@
  */
 static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
 {
-	int util = __cpu_util(cpu, delta);
+	int util = cpu_util_cum(cpu, delta);
 
 	if (util >= capacity)
 		return SCHED_CAPACITY_SCALE;
 
-	return (util << SCHED_CAPACITY_SHIFT)/capacity;
+	return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
+}
+
+static inline int task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+		u64 demand = p->ravg.demand;
+
+		return (demand << 10) / sched_ravg_window;
+	}
+#endif
+	return p->se.avg.util_avg;
+}
+
+#define SCHED_ENABLE_WAKER_WAKEE	0
+
+static unsigned int sched_small_wakee_task_util = 102; /* ~10% of max cap */
+static unsigned int sched_big_waker_task_util = 256;  /* 25% of max cap */
+
+static inline bool
+wake_on_waker_sibling(struct task_struct *p)
+{
+	return SCHED_ENABLE_WAKER_WAKEE &&
+	       task_util(current) > sched_big_waker_task_util &&
+	       task_util(p) < sched_small_wakee_task_util;
+}
+
+#define sysctl_sched_prefer_sync_wakee_to_waker 0
+
+static inline bool
+bias_to_waker_cpu(struct task_struct *p, int cpu)
+{
+	return sysctl_sched_prefer_sync_wakee_to_waker &&
+	       cpu_rq(cpu)->nr_running == 1 &&
+	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
+	       cpu_active(cpu) && !cpu_isolated(cpu);
 }
 
 static int calc_util_delta(struct energy_env *eenv, int cpu)
 {
+#ifdef CONFIG_SCHED_WALT
+	if (cpu == eenv->src_cpu) {
+		if (!walt_disabled && sysctl_sched_use_walt_task_util &&
+		    !task_in_cum_window_demand(cpu_rq(cpu), eenv->task)) {
+			if (eenv->util_delta == 0)
+				/*
+				 * energy before - calculate energy cost when
+				 * the new task is placed onto src_cpu.  The
+				 * task is not on a runqueue so its util is not
+				 * in the WALT's cr_avg as it's discounted when
+				 * it slept last time.  Hence return task's util
+				 * as delta to calculate energy cost of src_cpu
+				 * as if the new task on it.
+				 */
+				return task_util(eenv->task);
+			/*
+			 * energy after - WALT's cr_avg already doesn't have the
+			 * new task's util accounted in.  Thus return 0 delta to
+			 * calculate energy cost of the src_cpu without the
+			 * task's util.
+			 */
+			return 0;
+		}
+		/*
+		 * Task is already on a runqueue for example while load
+		 * balancing.  WALT's cpu util already accounted the task's
+		 * util.  return 0 delta for energy before so energy calculation
+		 * to be done with the task's util accounted, return -task_util
+		 * for energy after so the calculation to be doen with
+		 * discounted task's util.
+		 */
+		return -eenv->util_delta;
+	}
+#else
 	if (cpu == eenv->src_cpu)
 		return -eenv->util_delta;
+#endif
 	if (cpu == eenv->dst_cpu)
 		return eenv->util_delta;
 	return 0;
@@ -5416,7 +5482,10 @@
 
 	for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
 		delta = calc_util_delta(eenv, i);
-		max_util = max(max_util, __cpu_util(i, delta));
+		/* substract sync_cpu's rq->curr util to discount its cost */
+		if (eenv->sync_cpu == i)
+			delta -= eenv->curr_util;
+		max_util = max(max_util, cpu_util_cum(i, delta));
 	}
 
 	return max_util;
@@ -5440,6 +5509,9 @@
 
 	for_each_cpu(i, sched_group_cpus(sg)) {
 		delta = calc_util_delta(eenv, i);
+		/* substract sync_cpu's rq->curr util to discount its cost */
+		if (eenv->sync_cpu == i)
+			delta -= eenv->curr_util;
 		util_sum += __cpu_norm_util(i, capacity, delta);
 	}
 
@@ -5448,17 +5520,26 @@
 	return util_sum;
 }
 
-static int find_new_capacity(struct energy_env *eenv,
-	const struct sched_group_energy * const sge)
+static int __find_new_capacity(unsigned long util,
+			       const struct sched_group_energy const *sge)
 {
 	int idx;
-	unsigned long util = group_max_util(eenv);
 
 	for (idx = 0; idx < sge->nr_cap_states; idx++) {
 		if (sge->cap_states[idx].cap >= util)
 			break;
 	}
 
+	return idx;
+}
+
+static int find_new_capacity(struct energy_env *eenv,
+			     const struct sched_group_energy const *sge)
+{
+	int idx;
+	unsigned long util = group_max_util(eenv);
+
+	idx = __find_new_capacity(util, sge);
 	eenv->cap_idx = idx;
 
 	return idx;
@@ -5491,7 +5572,8 @@
 static int sched_group_energy(struct energy_env *eenv)
 {
 	struct sched_domain *sd;
-	int cpu, total_energy = 0;
+	int cpu;
+	u64 total_energy = 0;
 	struct cpumask visit_cpus;
 	struct sched_group *sg;
 
@@ -5557,14 +5639,23 @@
 
 				idle_idx = group_idle_state(sg);
 				group_util = group_norm_util(eenv, sg);
-				sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
-								>> SCHED_CAPACITY_SHIFT;
-				sg_idle_energy = ((SCHED_CAPACITY_SCALE-group_util)
-								* sg->sge->idle_states[idle_idx].power)
-								>> SCHED_CAPACITY_SHIFT;
+				sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
+
+				if (idle_idx == 0)
+					sg_idle_energy = ((SCHED_CAPACITY_SCALE - group_util)
+							* sg->sge->cap_states[cap_idx].power);
+				else
+					sg_idle_energy = ((SCHED_CAPACITY_SCALE - group_util)
+							* sg->sge->idle_states[idle_idx].power);
 
 				total_energy += sg_busy_energy + sg_idle_energy;
 
+				trace_sched_group_energy(group_first_cpu(sg),
+					group_util, total_energy,
+					sg_busy_energy, sg_idle_energy,
+					idle_idx,
+					sg->sge->cap_states[eenv->cap_idx].cap);
+
 				if (!sd->child)
 					cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
 
@@ -5578,7 +5669,7 @@
 		continue;
 	}
 
-	eenv->energy = total_energy;
+	eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
 	return 0;
 }
 
@@ -5606,6 +5697,8 @@
 		.dst_cpu	= eenv->dst_cpu,
 		.nrg		= { 0, 0, 0, 0},
 		.cap		= { 0, 0, 0 },
+		.task		= eenv->task,
+		.sync_cpu       = eenv->sync_cpu,
 	};
 
 	if (eenv->src_cpu == eenv->dst_cpu)
@@ -5823,38 +5916,33 @@
 	return 1;
 }
 
-static inline int task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
-		unsigned long demand = p->ravg.demand;
-		return (demand << 10) / walt_ravg_window;
-	}
-#endif
-	return p->se.avg.util_avg;
-}
-
 static inline unsigned long boosted_task_util(struct task_struct *task);
 
 static inline bool __task_fits(struct task_struct *p, int cpu, int util)
 {
-	unsigned long capacity = capacity_of(cpu);
+	unsigned int margin;
 
 	util += boosted_task_util(p);
 
-	return (capacity * 1024) > (util * capacity_margin);
+	if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
+		margin = capacity_margin_down;
+	else
+		margin = capacity_margin;
+
+	return (capacity_orig_of(cpu) * 1024) > (util * margin);
 }
 
 static inline bool task_fits_max(struct task_struct *p, int cpu)
 {
-	unsigned long capacity = capacity_of(cpu);
+	unsigned long capacity = capacity_orig_of(cpu);
 	unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
 
 	if (capacity == max_capacity)
 		return true;
 
-	if (capacity * capacity_margin > max_capacity * 1024)
-		return true;
+	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
+					task_sched_boost(p))
+		return false;
 
 	return __task_fits(p, cpu, 0);
 }
@@ -5864,9 +5952,15 @@
 	return __task_fits(p, cpu, cpu_util(cpu));
 }
 
-static bool cpu_overutilized(int cpu)
+static bool __cpu_overutilized(int cpu, int delta)
 {
-	return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+	return (capacity_orig_of(cpu) * 1024) <
+	       ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
+bool cpu_overutilized(int cpu)
+{
+	return __cpu_overutilized(cpu, 0);
 }
 
 #ifdef CONFIG_SCHED_TUNE
@@ -5967,7 +6061,7 @@
 static inline unsigned long
 boosted_cpu_util(int cpu)
 {
-	unsigned long util = cpu_util(cpu);
+	unsigned long util = cpu_util_freq(cpu, NULL);
 	long margin = schedtune_cpu_margin(util, cpu);
 
 	trace_sched_boost_cpu(cpu, util, margin);
@@ -6248,7 +6342,7 @@
 				idle = false;
 		}
 
-		if (idle)
+		if (!cpu_isolated(cpu) && idle)
 			return core;
 	}
 
@@ -6273,6 +6367,8 @@
 	for_each_cpu(cpu, cpu_smt_mask(target)) {
 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 			continue;
+		if (cpu_isolated(cpu))
+			continue;
 		if (idle_cpu(cpu))
 			return cpu;
 	}
@@ -6325,6 +6421,8 @@
 	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 			continue;
+		if (cpu_isolated(cpu))
+			continue;
 		if (idle_cpu(cpu))
 			break;
 	}
@@ -6350,13 +6448,14 @@
 	int best_idle_capacity = INT_MAX;
 
 	if (!sysctl_sched_cstate_aware) {
-		if (idle_cpu(target))
+		if (idle_cpu(target) && !cpu_isolated(target))
 			return target;
 
 		/*
 		 * If the prevous cpu is cache affine and idle, don't be stupid.
 		 */
-		if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+		if (i != target && cpus_share_cache(i, target) &&
+				idle_cpu(i) && !cpu_isolated(i))
 			return i;
 
 		sd = rcu_dereference(per_cpu(sd_llc, target));
@@ -6394,6 +6493,10 @@
 					int idle_idx = idle_get_state_idx(rq);
 					unsigned long new_usage = boosted_task_util(p);
 					unsigned long capacity_orig = capacity_orig_of(i);
+
+					if (cpu_isolated(i))
+						continue;
+
 					if (new_usage > capacity_orig || !idle_cpu(i))
 						goto next;
 
@@ -6408,6 +6511,9 @@
 				}
 			} else {
 				for_each_cpu(i, sched_group_cpus(sg)) {
+					if (cpu_isolated(i))
+						continue;
+
 					if (i == target || !idle_cpu(i))
 						goto next;
 				}
@@ -6468,7 +6574,7 @@
 			continue;
 
 #ifdef CONFIG_SCHED_WALT
-		if (walt_cpu_high_irqload(i))
+		if (sched_cpu_high_irqload(i))
 			continue;
 #endif
 		/*
@@ -6528,22 +6634,107 @@
 	return target_cpu;
 }
 
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+	return (current->flags & PF_WAKE_UP_IDLE) ||
+		 (p->flags & PF_WAKE_UP_IDLE);
+}
+
+static bool
+is_packing_eligible(struct task_struct *p, unsigned long task_util,
+		    struct sched_group *sg_target,
+		    unsigned long target_cpu_new_util_cum,
+		    int targeted_cpus)
+{
+	int cpu_cap_idx_pack, cpu_cap_idx_spread, cap_idx0, cap_idx1;
+
+	if (targeted_cpus > 1)
+		/*
+		 * More than one CPUs were evaulated and target_cpu is the
+		 * least loaded CPU among the CPUs.  Thus target_cpu won't
+		 * raise OPP.
+		 */
+		return true;
+
+	/*
+	 * There is only one CPU out of C-state.
+	 *
+	 * cpu_cap_idx_pack contains estimated OPP index of target_cpu when we
+	 * pack the new task onto the target_cpu.
+	 * cap_idx0 and cap_idx1 contain OPP indices of two CPUs, one for
+	 * target_cpu without new task's load, one other for new idle CPU with
+	 * task's load.
+	 *
+	 *   Pack :                       Spread :
+	 *  cap_idx_pack is new OPP.     max(cap_idx0, cap_idx1) is new OPP.
+	 *  ________________             ________________
+	 *  |              |             |              | ______________
+	 *  | cap_idx_pack |             |   cap_idx0   | |  cap_idx1  |
+	 *  | (target_cpu) |             | (target_cpu) | | (idle cpu) |
+	 *  ----------------             ---------------- --------------
+	 *
+	 * The target_cpu's current capacity can be much more than target_cpu's
+	 * current utilization due to for example hysteresis while task
+	 * migration.  In that the case, packing onto the target_cpu based on
+	 * current capacity would deprive chance to lower the OPP and will end
+	 * up making target_cpu to keep the higher OOP longer than spreading.
+	 *
+	 * Try task packing only when packing won't make to keep the current
+	 * OPP longer than wihout packing.
+	 */
+
+	cpu_cap_idx_pack = __find_new_capacity(target_cpu_new_util_cum,
+					       sg_target->sge);
+
+	cap_idx0 = __find_new_capacity(target_cpu_new_util_cum - task_util,
+				       sg_target->sge);
+	cap_idx1 = __find_new_capacity(task_util, sg_target->sge);
+
+	cpu_cap_idx_spread = max(cap_idx0, cap_idx1);
+
+	trace_sched_energy_diff_packing(p, task_util, targeted_cpus,
+					cpu_cap_idx_pack, cpu_cap_idx_spread);
+
+	return cpu_cap_idx_pack == cpu_cap_idx_spread;
+}
+
 static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 {
 	struct sched_domain *sd;
 	struct sched_group *sg, *sg_target;
 	int target_max_cap = INT_MAX;
-	int target_cpu = task_cpu(p);
-	unsigned long task_util_boosted, new_util;
+	int target_cpu, targeted_cpus = 0;
+	unsigned long task_util_boosted = 0, curr_util = 0;
+	long new_util, new_util_cum;
 	int i;
-
-	if (sysctl_sched_sync_hint_enable && sync) {
-		int cpu = smp_processor_id();
-		cpumask_t search_cpus;
-		cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
-		if (cpumask_test_cpu(cpu, &search_cpus))
-			return cpu;
-	}
+	int ediff = -1;
+	int cpu = smp_processor_id();
+	int min_util_cpu = -1;
+	int min_util_cpu_idle_idx = INT_MAX;
+	long min_util_cpu_util_cum = LONG_MAX;
+	unsigned int min_util = UINT_MAX;
+	int cpu_idle_idx;
+	int min_idle_idx_cpu;
+	int min_idle_idx = INT_MAX;
+	bool safe_to_pack = false;
+	unsigned int target_cpu_util = UINT_MAX;
+	long target_cpu_new_util_cum = LONG_MAX;
+	struct cpumask *rtg_target = NULL;
+	bool wake_on_sibling = false;
+	int isolated_candidate = -1;
+	bool need_idle;
+	bool skip_ediff = false;
+	enum sched_boost_policy placement_boost = task_sched_boost(p) ?
+				sched_boost_policy() : SCHED_BOOST_NONE;
 
 	sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
 
@@ -6553,7 +6744,31 @@
 	sg = sd->groups;
 	sg_target = sg;
 
+	sync = sync && sysctl_sched_sync_hint_enable;
+	curr_util = boosted_task_util(cpu_rq(cpu)->curr);
+
+	need_idle = wake_to_idle(p);
+
 	if (sysctl_sched_is_big_little) {
+		struct related_thread_group *grp;
+
+		rcu_read_lock();
+		grp = task_related_thread_group(p);
+		rcu_read_unlock();
+
+		if (grp && grp->preferred_cluster) {
+			rtg_target = &grp->preferred_cluster->cpus;
+		} else if (sync && wake_on_waker_sibling(p)) {
+			if (bias_to_waker_cpu(p, cpu)) {
+				trace_sched_task_util_bias_to_waker(p,
+						task_cpu(p), task_util(p), cpu,
+						cpu, 0, need_idle);
+				return cpu;
+			}
+			wake_on_sibling = true;
+		}
+
+		task_util_boosted = boosted_task_util(p);
 
 		/*
 		 * Find group with sufficient capacity. We only get here if no cpu is
@@ -6563,24 +6778,96 @@
 		 * point.
 		 */
 		do {
+			int max_cap_cpu;
+			cpumask_t avail_cpus;
+
+			/* Are all CPUs isolated in this group? */
+			if (unlikely(!sg->group_weight))
+				continue;
+
+			/* Can this task run on any CPUs of this group? */
+			cpumask_and(&avail_cpus, sched_group_cpus(sg),
+							tsk_cpus_allowed(p));
+			cpumask_andnot(&avail_cpus, &avail_cpus,
+							cpu_isolated_mask);
+			if (cpumask_empty(&avail_cpus))
+				continue;
+
 			/* Assuming all cpus are the same in group */
-			int max_cap_cpu = group_first_cpu(sg);
+			max_cap_cpu = group_first_cpu(sg);
 
 			/*
 			 * Assume smaller max capacity means more energy-efficient.
 			 * Ideally we should query the energy model for the right
 			 * answer but it easily ends up in an exhaustive search.
 			 */
-			if (capacity_of(max_cap_cpu) < target_max_cap &&
+			if (capacity_orig_of(max_cap_cpu) < target_max_cap &&
 			    task_fits_max(p, max_cap_cpu)) {
 				sg_target = sg;
+
+				if (rtg_target) {
+					/*
+					 * For tasks that belong to a related
+					 * thread group, select the preferred
+					 * cluster if the task can fit there,
+					 * otherwise select the cluster which
+					 * can fit the task.
+					 */
+					if (cpumask_test_cpu(max_cap_cpu,
+							     rtg_target))
+						break;
+					continue;
+				} else if (wake_on_sibling) {
+					/* Skip non-sibling CPUs */
+					if (!cpumask_test_cpu(cpu,
+							sched_group_cpus(sg)))
+						continue;
+				} else if (sync && curr_util >=
+						   task_util_boosted) {
+					if (cpumask_test_cpu(cpu,
+							sched_group_cpus(sg))) {
+						if (!cpumask_test_cpu(task_cpu(p),
+								      sched_group_cpus(sg)))
+							skip_ediff = true;
+						break;
+					}
+					continue;
+				}
+
 				target_max_cap = capacity_of(max_cap_cpu);
 			}
 		} while (sg = sg->next, sg != sd->groups);
 
-		task_util_boosted = boosted_task_util(p);
+		target_cpu = -1;
+
 		/* Find cpu with sufficient capacity */
 		for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
+			if (cpu_isolated(i))
+				continue;
+
+			if (isolated_candidate == -1)
+				isolated_candidate = i;
+
+			if (is_reserved(i))
+				continue;
+
+			if (sched_cpu_high_irqload(cpu))
+				continue;
+
+			/*
+			 * Since this code is inside sched_is_big_little,
+			 * we are going to assume that boost policy is
+			 * SCHED_BOOST_ON_BIG.
+			 */
+			if (placement_boost != SCHED_BOOST_NONE) {
+				new_util = cpu_util(i);
+				if (new_util < min_util) {
+					min_util_cpu = i;
+					min_util = new_util;
+				}
+				continue;
+			}
+
 			/*
 			 * p's blocked utilization is still accounted for on prev_cpu
 			 * so prev_cpu will receive a negative bias due to the double
@@ -6588,6 +6875,19 @@
 			 */
 			new_util = cpu_util(i) + task_util_boosted;
 
+			if (task_in_cum_window_demand(cpu_rq(i), p))
+				new_util_cum = cpu_util_cum(i, 0) +
+					       task_util_boosted - task_util(p);
+			else
+				new_util_cum = cpu_util_cum(i, 0) +
+					       task_util_boosted;
+
+			if (sync && i == cpu)
+				new_util -= curr_util;
+
+			trace_sched_cpu_util(p, i, task_util_boosted, curr_util,
+					     new_util_cum, sync);
+
 			/*
 			 * Ensure minimum capacity to grant the required boost.
 			 * The target CPU can be already at a capacity level higher
@@ -6596,15 +6896,96 @@
 			if (new_util > capacity_orig_of(i))
 				continue;
 
-			if (new_util < capacity_curr_of(i)) {
-				target_cpu = i;
-				if (cpu_rq(i)->nr_running)
+			cpu_idle_idx = cpu_rq(i)->nr_running ? -1 :
+				       idle_get_state_idx(cpu_rq(i));
+
+			if (!need_idle &&
+			    (!wake_on_sibling ||
+			     (wake_on_sibling && i != cpu)) &&
+			    add_capacity_margin(new_util_cum) <
+			    capacity_curr_of(i)) {
+				if (sysctl_sched_cstate_aware) {
+					if (cpu_idle_idx < min_idle_idx) {
+						min_idle_idx = cpu_idle_idx;
+						min_idle_idx_cpu = i;
+						target_cpu = i;
+						target_cpu_util = new_util;
+						target_cpu_new_util_cum =
+						    new_util_cum;
+						targeted_cpus = 1;
+					} else if (cpu_idle_idx ==
+						   min_idle_idx &&
+						   (target_cpu_util >
+						    new_util ||
+						    (target_cpu_util ==
+						     new_util &&
+						     (i == task_cpu(p) ||
+						      (target_cpu !=
+						       task_cpu(p) &&
+						       target_cpu_new_util_cum >
+						       new_util_cum))))) {
+						min_idle_idx_cpu = i;
+						target_cpu = i;
+						target_cpu_util = new_util;
+						target_cpu_new_util_cum =
+						    new_util_cum;
+						targeted_cpus++;
+					}
+				} else if (cpu_rq(i)->nr_running) {
+					target_cpu = i;
 					break;
+				}
+			} else if (!need_idle &&
+				   (!wake_on_sibling ||
+				    (wake_on_sibling && i != cpu))) {
+				/*
+				 * At least one CPU other than target_cpu is
+				 * going to raise CPU's OPP higher than current
+				 * because current CPU util is more than current
+				 * capacity + margin.  We can safely do task
+				 * packing without worrying about doing such
+				 * itself raises OPP.
+				 */
+				safe_to_pack = true;
 			}
 
-			/* cpu has capacity at higher OPP, keep it as fallback */
-			if (target_cpu == task_cpu(p))
-				target_cpu = i;
+			/*
+			 * cpu has capacity at higher OPP, keep it as
+			 * fallback.
+			 */
+			if (new_util < min_util) {
+				min_util_cpu = i;
+				min_util = new_util;
+				min_util_cpu_idle_idx = cpu_idle_idx;
+				min_util_cpu_util_cum = new_util_cum;
+			} else if (sysctl_sched_cstate_aware &&
+				   min_util == new_util) {
+				if (min_util_cpu == task_cpu(p))
+					continue;
+
+				if (i == task_cpu(p) ||
+				    (cpu_idle_idx < min_util_cpu_idle_idx ||
+				     (cpu_idle_idx == min_util_cpu_idle_idx &&
+				      min_util_cpu_util_cum > new_util_cum))) {
+					min_util_cpu = i;
+					min_util_cpu_idle_idx = cpu_idle_idx;
+					min_util_cpu_util_cum = new_util_cum;
+				}
+			}
+		}
+
+		if (target_cpu == -1 ||
+		    (target_cpu != min_util_cpu && !safe_to_pack &&
+		     !is_packing_eligible(p, task_util_boosted, sg_target,
+					  target_cpu_new_util_cum,
+					  targeted_cpus))) {
+			if (likely(min_util_cpu != -1))
+				target_cpu = min_util_cpu;
+			else if (cpu_isolated(task_cpu(p)) &&
+					isolated_candidate != -1)
+				target_cpu = isolated_candidate;
+			else
+				target_cpu = task_cpu(p);
 		}
 	} else {
 		/*
@@ -6618,6 +6999,8 @@
 		bool prefer_idle = 0;
 #endif
 		int tmp_target = find_best_target(p, boosted, prefer_idle);
+
+		target_cpu = task_cpu(p);
 		if (tmp_target >= 0) {
 			target_cpu = tmp_target;
 			if ((boosted || prefer_idle) && idle_cpu(target_cpu))
@@ -6625,22 +7008,73 @@
 		}
 	}
 
-	if (target_cpu != task_cpu(p)) {
+	if (wake_on_sibling && target_cpu != -1) {
+		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
+						task_util(p), target_cpu,
+						target_cpu, 0, need_idle);
+		return target_cpu;
+	}
+
+	if (target_cpu != task_cpu(p) && !cpu_isolated(task_cpu(p))) {
 		struct energy_env eenv = {
 			.util_delta	= task_util(p),
 			.src_cpu	= task_cpu(p),
 			.dst_cpu	= target_cpu,
 			.task		= p,
+			.sync_cpu	= sync ? smp_processor_id() : -1,
+			.curr_util	= curr_util,
 		};
 
-		/* Not enough spare capacity on previous cpu */
-		if (cpu_overutilized(task_cpu(p)))
+		/*
+		 * We always want to migrate the task to the preferred cluster.
+		 */
+		if (rtg_target) {
+			trace_sched_task_util_colocated(p, task_cpu(p),
+						task_util(p),
+						cpumask_first(rtg_target),
+						target_cpu, 0, need_idle);
 			return target_cpu;
+		}
 
-		if (energy_diff(&eenv) >= 0)
-			return task_cpu(p);
+#ifdef CONFIG_SCHED_WALT
+		if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+			task_util_boosted = 0;
+#else
+		task_util_boosted = 0;
+#endif
+		/* Not enough spare capacity on previous cpu */
+		if (__cpu_overutilized(task_cpu(p), task_util_boosted)) {
+			trace_sched_task_util_overutilzed(p, task_cpu(p),
+						task_util(p), target_cpu,
+						target_cpu, 0, need_idle);
+			return target_cpu;
+		}
+
+		if (!skip_ediff)
+			ediff = energy_diff(&eenv);
+
+		if (!sysctl_sched_cstate_aware) {
+			if (ediff >= 0) {
+				trace_sched_task_util_energy_diff(p,
+						task_cpu(p), task_util(p),
+						target_cpu, task_cpu(p), ediff,
+						need_idle);
+				return task_cpu(p);
+			}
+		} else {
+			if (ediff > 0) {
+				trace_sched_task_util_energy_diff(p,
+						task_cpu(p), task_util(p),
+						target_cpu, task_cpu(p), ediff,
+						need_idle);
+				return task_cpu(p);
+			}
+		}
 	}
 
+	trace_sched_task_util_energy_aware(p, task_cpu(p), task_util(p),
+					   target_cpu, target_cpu, ediff,
+					   need_idle);
 	return target_cpu;
 }
 
@@ -6669,6 +7103,9 @@
 	return select_best_cpu(p, prev_cpu, 0, sync);
 #endif
 
+	if (energy_aware())
+		return energy_aware_wake_cpu(p, prev_cpu, sync);
+
 	if (sd_flag & SD_BALANCE_WAKE) {
 		record_wakee(p);
 		want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
@@ -7332,8 +7769,11 @@
 	unsigned int		loop_max;
 
 	enum fbq_type		fbq_type;
+	enum group_type		busiest_group_type;
 	struct list_head	tasks;
+#ifdef CONFIG_SCHED_HMP
 	enum sched_boost_policy	boost_policy;
+#endif
 };
 
 /*
@@ -7431,7 +7871,9 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
+#ifdef CONFIG_SCHED_HMP
 	int twf, group_cpus;
+#endif
 
 	lockdep_assert_held(&env->src_rq->lock);
 
@@ -7475,9 +7917,34 @@
 		return 0;
 	}
 
+	if (energy_aware() && !env->dst_rq->rd->overutilized &&
+	    env->idle == CPU_NEWLY_IDLE) {
+		long util_cum_dst, util_cum_src;
+		unsigned long demand;
+
+		demand = task_util(p);
+		util_cum_dst = cpu_util_cum(env->dst_cpu, 0) + demand;
+		util_cum_src = cpu_util_cum(env->src_cpu, 0) - demand;
+
+		if (util_cum_dst > util_cum_src)
+			return 0;
+	}
+
 	/* Record that we found atleast one task that could run on dst_cpu */
 	env->flags &= ~LBF_ALL_PINNED;
 
+#ifdef CONFIG_SCHED_WALT
+	if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
+			 !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
+		return 0;
+
+	/* Don't detach task if it doesn't fit on the destination */
+	if (env->flags & LBF_IGNORE_BIG_TASKS &&
+		!task_fits_max(p, env->dst_cpu))
+		return 0;
+#endif
+
+#ifdef CONFIG_SCHED_HMP
 	if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
 		if (nr_big_tasks(env->src_rq) && !is_big_task(p))
 			return 0;
@@ -7496,10 +7963,6 @@
 	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
 		return 0;
 
-	if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
-	    !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p))
-		return 0;
-
 	/*
 	 * Group imbalance can sometimes cause work to be pulled across groups
 	 * even though the group could have managed the imbalance on its own.
@@ -7510,6 +7973,7 @@
 						 SCHED_CAPACITY_SCALE);
 	if (!twf && env->busiest_nr_running <= group_cpus)
 		return 0;
+#endif
 
 	if (task_running(env->src_rq, p)) {
 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
@@ -7977,6 +8441,8 @@
 	int max_cap_cpu;
 	unsigned long flags;
 
+	capacity = min(capacity, thermal_cap(cpu));
+
 	cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
 	mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
@@ -7998,6 +8464,8 @@
 	raw_spin_unlock_irqrestore(&mcc->lock, flags);
 
 skip_unlock: __attribute__ ((unused));
+	sdg->sgc->max_capacity = capacity;
+
 	capacity *= scale_rt_capacity(cpu);
 	capacity >>= SCHED_CAPACITY_SHIFT;
 
@@ -8006,7 +8474,6 @@
 
 	cpu_rq(cpu)->cpu_capacity = capacity;
 	sdg->sgc->capacity = capacity;
-	sdg->sgc->max_capacity = capacity;
 }
 
 void update_group_capacity(struct sched_domain *sd, int cpu)
@@ -8176,6 +8643,17 @@
 	return false;
 }
 
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+	return sg->sgc->max_capacity < ref->sgc->max_capacity;
+}
+
 static inline enum
 group_type group_classify(struct sched_group *group,
 			  struct sg_lb_stats *sgs)
@@ -8253,11 +8731,11 @@
 		if (!nr_running && idle_cpu(i))
 			sgs->idle_cpus++;
 
-		if (cpu_overutilized(i)) {
+		if (cpu_overutilized(i))
 			*overutilized = true;
-			if (!sgs->group_misfit_task && rq->misfit_task)
-				sgs->group_misfit_task = capacity_of(i);
-		}
+
+		if (!sgs->group_misfit_task && rq->misfit_task)
+			sgs->group_misfit_task = capacity_of(i);
 	}
 
 	/* Isolated CPU has no weight */
@@ -8312,9 +8790,25 @@
 	if (sgs->group_type < busiest->group_type)
 		return false;
 
+	/*
+	 * Candidate sg doesn't face any serious load-balance problems
+	 * so don't pick it if the local sg is already filled up.
+	 */
+	if (sgs->group_type == group_other &&
+	    !group_has_capacity(env, &sds->local_stat))
+		return false;
+
 	if (sgs->avg_load <= busiest->avg_load)
 		return false;
 
+	/*
+	 * Candiate sg has no more than one task per cpu and has higher
+	 * per-cpu capacity. No reason to pull tasks to less capable cpus.
+	 */
+	if (sgs->sum_nr_running <= sgs->group_weight &&
+	    group_smaller_cpu_capacity(sds->local, sg))
+		return false;
+
 	/* This is the busiest node in its class. */
 	if (!(env->sd->flags & SD_ASYM_PACKING))
 		return true;
@@ -8424,6 +8918,15 @@
 			sgs->group_type = group_classify(sg, sgs);
 		}
 
+		/*
+		 * Ignore task groups with misfit tasks if local group has no
+		 * capacity or if per-cpu capacity isn't higher.
+		 */
+		if (sgs->group_type == group_misfit_task && sds->local &&
+		    (!group_has_capacity(env, &sds->local_stat) ||
+		     !group_smaller_cpu_capacity(sg, sds->local)))
+			sgs->group_type = group_other;
+
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
@@ -8611,6 +9114,22 @@
 	 */
 	if (busiest->avg_load <= sds->avg_load ||
 	    local->avg_load >= sds->avg_load) {
+		/* Misfitting tasks should be migrated in any case */
+		if (busiest->group_type == group_misfit_task) {
+			env->imbalance = busiest->group_misfit_task;
+			return;
+		}
+
+		/*
+		 * Busiest group is overloaded, local is not, use the spare
+		 * cycles to maximize throughput
+		 */
+		if (busiest->group_type == group_overloaded &&
+		    local->group_type <= group_misfit_task) {
+			env->imbalance = busiest->load_per_task;
+			return;
+		}
+
 		env->imbalance = 0;
 		return fix_small_imbalance(env, sds);
 	}
@@ -8644,6 +9163,11 @@
 		(sds->avg_load - local->avg_load) * local->group_capacity
 	) / SCHED_CAPACITY_SCALE;
 
+	/* Boost imbalance to allow misfit task to be balanced. */
+	if (busiest->group_type == group_misfit_task)
+		env->imbalance = max_t(long, env->imbalance,
+				     busiest->group_misfit_task);
+
 	/*
 	 * if *imbalance is less than the average load per runnable task
 	 * there is no guarantee that any tasks will be moved so we'll have
@@ -8680,8 +9204,34 @@
 	 */
 	update_sd_lb_stats(env, &sds);
 
-	if (energy_aware() && !env->dst_rq->rd->overutilized)
-		goto out_balanced;
+	if (energy_aware() && !env->dst_rq->rd->overutilized) {
+		int cpu_local, cpu_busiest;
+		long util_cum;
+		unsigned long capacity_local, capacity_busiest;
+
+		if (env->idle != CPU_NEWLY_IDLE)
+			goto out_balanced;
+
+		if (!sds.local || !sds.busiest)
+			goto out_balanced;
+
+		cpu_local = group_first_cpu(sds.local);
+		cpu_busiest = group_first_cpu(sds.busiest);
+
+		 /* TODO: don't assume same cap cpus are in same domain */
+		capacity_local = capacity_orig_of(cpu_local);
+		capacity_busiest = capacity_orig_of(cpu_busiest);
+		if (capacity_local > capacity_busiest) {
+			goto out_balanced;
+		} else if (capacity_local == capacity_busiest) {
+			if (cpu_rq(cpu_busiest)->nr_running < 2)
+				goto out_balanced;
+
+			util_cum = cpu_util_cum(cpu_busiest, 0);
+			if (util_cum < cpu_util_cum(cpu_local, 0))
+				goto out_balanced;
+		}
+	}
 
 	local = &sds.local_stat;
 	busiest = &sds.busiest_stat;
@@ -8716,6 +9266,11 @@
 	    busiest->group_no_capacity)
 		goto force_balance;
 
+	/* Misfitting tasks should be dealt with regardless of the avg load */
+	if (busiest->group_type == group_misfit_task) {
+		goto force_balance;
+	}
+
 	/*
 	 * If the local group is busier than the selected busiest group
 	 * don't try and pull any tasks.
@@ -8739,7 +9294,8 @@
 		 * might end up to just move the imbalance on another group
 		 */
 		if ((busiest->group_type != group_overloaded) &&
-				(local->idle_cpus <= (busiest->idle_cpus + 1)))
+		    (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+		    !group_smaller_cpu_capacity(sds.busiest, sds.local))
 			goto out_balanced;
 	} else {
 		/*
@@ -8752,6 +9308,7 @@
 	}
 
 force_balance:
+	env->busiest_group_type = busiest->group_type;
 	/* Looks like there is an imbalance. Compute it */
 	calculate_imbalance(env, &sds);
 	return sds.busiest;
@@ -8814,10 +9371,29 @@
 		 */
 
 		if (rq->nr_running == 1 && wl > env->imbalance &&
-		    !check_cpu_capacity(rq, env->sd))
+		    !check_cpu_capacity(rq, env->sd) &&
+		    env->busiest_group_type != group_misfit_task)
 			continue;
 
 		/*
+		 * After enable energy awared scheduling, it has higher
+		 * priority to migrate misfit task rather than from most
+		 * loaded CPU; E.g. one CPU with single misfit task and
+		 * other CPUs with multiple lower load tasks, we should
+		 * firstly make sure the misfit task can be migrated onto
+		 * higher capacity CPU.
+		 */
+		if (energy_aware() &&
+		    capacity_orig_of(i) < capacity_orig_of(env->dst_cpu) &&
+		    rq->misfit_task &&
+		    env->busiest_group_type == group_misfit_task) {
+			busiest_load = wl;
+			busiest_capacity = capacity;
+			busiest = rq;
+			break;
+		}
+
+		/*
 		 * For the load comparisons with the other cpu's, consider
 		 * the weighted_cpuload() scaled with the cpu capacity, so
 		 * that the load can be moved away from the cpu that is
@@ -8876,12 +9452,10 @@
 			return 1;
 	}
 
-	if (energy_aware() && (capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
-				env->src_rq->cfs.h_nr_running == 1 &&
-				cpu_overutilized(env->src_cpu) &&
-				!cpu_overutilized(env->dst_cpu)) {
-			return 1;
-	}
+	if ((env->idle != CPU_NOT_IDLE) &&
+	    (capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu)) &&
+	    env->src_rq->misfit_task)
+		return 1;
 
 	return unlikely(sd->nr_balance_failed >
 			sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
@@ -8963,7 +9537,9 @@
 		.loop			= 0,
 		.busiest_nr_running     = 0,
 		.busiest_grp_capacity   = 0,
+#ifdef CONFIG_SCHED_HMP
 		.boost_policy		= sched_boost_policy(),
+#endif
 	};
 
 	/*
@@ -9419,7 +9995,9 @@
 		.busiest_grp_capacity	= 0,
 		.flags			= 0,
 		.loop			= 0,
+#ifdef CONFIG_SCHED_HMP
 		.boost_policy		= sched_boost_policy(),
+#endif
 	};
 	bool moved = false;
 
@@ -9536,12 +10114,33 @@
 
 static inline int find_new_ilb(int type)
 {
-	int ilb = cpumask_first(nohz.idle_cpus_mask);
+	int ilb = nr_cpu_ids;
+	struct sched_domain *sd;
+	int cpu = raw_smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+	cpumask_t cpumask;
 
 #ifdef CONFIG_SCHED_HMP
 	return find_new_hmp_ilb(type);
 #endif
 
+	rcu_read_lock();
+	sd = rcu_dereference_check_sched_domain(rq->sd);
+	if (sd) {
+		cpumask_and(&cpumask, nohz.idle_cpus_mask,
+			    sched_domain_span(sd));
+		ilb = cpumask_first(&cpumask);
+	}
+	rcu_read_unlock();
+
+	if (sd && (ilb >= nr_cpu_ids || !idle_cpu(ilb))) {
+		if (!energy_aware() ||
+		    (capacity_orig_of(cpu) ==
+		     cpu_rq(cpu)->rd->max_cpu_capacity.val ||
+		     cpu_overutilized(cpu)))
+			ilb = cpumask_first(nohz.idle_cpus_mask);
+	}
+
 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
 		return ilb;
 
@@ -9892,9 +10491,8 @@
 	if (time_before(now, nohz.next_balance))
 		return false;
 
-	if (rq->nr_running >= 2 &&
-	    (!energy_aware() || cpu_overutilized(cpu)))
-		return true;
+	if (energy_aware())
+		return rq->nr_running >= 2 && cpu_overutilized(cpu);
 
 #ifndef CONFIG_SCHED_HMP
 	rcu_read_lock();
@@ -10004,6 +10602,10 @@
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &curr->se;
+#ifdef CONFIG_SMP
+	bool old_misfit = curr->misfit;
+	bool misfit;
+#endif
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
@@ -10019,7 +10621,13 @@
 		trace_sched_overutilized(true);
 	}
 
-	rq->misfit_task = !task_fits_max(curr, rq->cpu);
+	misfit = !task_fits_max(curr, rq->cpu);
+	rq->misfit_task = misfit;
+
+	if (old_misfit != misfit) {
+		adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
+		curr->misfit = misfit;
+	}
 #endif
 
 }
@@ -10481,7 +11089,7 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	.task_change_group	= task_change_group_fair,
 #endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_fair,
 #endif
 };
@@ -10531,6 +11139,154 @@
 
 }
 
+/* WALT sched implementation begins here */
+
+#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+	return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
+	for (tg = container_of(&task_groups, struct task_group, list);	\
+		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+	struct task_group *tg;
+	struct cfs_rq *cfs_rq;
+
+	rcu_read_lock();
+
+	for_each_cfs_rq(cfs_rq, tg, cpu)
+		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+	rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	/*
+	 * Although below check is not strictly required  (as
+	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+	 * efficiency by short-circuiting for_each_sched_entity() loop when
+	 * sched_disable_window_stats
+	 */
+	if (sched_disable_window_stats)
+		return;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se) {
+		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+	}
+}
+
+#elif defined(CONFIG_SCHED_WALT)
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	/* Invoke active balance to force migrate currently running task */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->active_balance) {
+		rq->active_balance = 1;
+		rq->push_cpu = new_cpu;
+		get_task_struct(p);
+		rq->push_task = p;
+		rc = 1;
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+#else
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+#endif
+
 /* QHMP/Zone sched implementation begins here */
 
 #ifdef CONFIG_SCHED_HMP
@@ -10584,21 +11340,6 @@
 	s64 highest_spare_capacity;
 };
 
-/*
- * Should task be woken to any available idle cpu?
- *
- * Waking tasks to idle cpu has mixed implications on both performance and
- * power. In many cases, scheduler can't estimate correctly impact of using idle
- * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
- * module to pass a strong hint to scheduler that the task in question should be
- * woken to idle cpu, generally to improve performance.
- */
-static inline int wake_to_idle(struct task_struct *p)
-{
-	return (current->flags & PF_WAKE_UP_IDLE) ||
-		 (p->flags & PF_WAKE_UP_IDLE);
-}
-
 static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
 {
 	u64 total_load;
@@ -11222,128 +11963,6 @@
 	return target;
 }
 
-#ifdef CONFIG_CFS_BANDWIDTH
-
-static inline struct task_group *next_task_group(struct task_group *tg)
-{
-	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
-	return (&tg->list == &task_groups) ? NULL : tg;
-}
-
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
-	for (tg = container_of(&task_groups, struct task_group, list);	\
-		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
-{
-	struct task_group *tg;
-	struct cfs_rq *cfs_rq;
-
-	rcu_read_lock();
-
-	for_each_cfs_rq(cfs_rq, tg, cpu)
-		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
-	rcu_read_unlock();
-}
-
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-
-	/*
-	 * Although below check is not strictly required  (as
-	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
-	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
-	 * efficiency by short-circuiting for_each_sched_entity() loop when
-	 * sched_disable_window_stats
-	 */
-	if (sched_disable_window_stats)
-		return;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-	}
-
-	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
-	if (!se)
-		inc_rq_hmp_stats(rq, p, change_cra);
-}
-
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-				       u32 new_task_load, u32 new_pred_demand)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-
-		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
-					      task_load_delta,
-					      pred_demand_delta);
-		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-	}
-
-	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
-	if (!se) {
-		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
-					      task_load_delta,
-					      pred_demand_delta);
-		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-	}
-}
-
-static int task_will_be_throttled(struct task_struct *p);
-
-#else	/* CONFIG_CFS_BANDWIDTH */
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-			   u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-}
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
-	return 0;
-}
-
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
-{
-	inc_nr_big_task(&rq->hmp_stats, p);
-}
-
-#endif	/* CONFIG_CFS_BANDWIDTH */
-
 /*
  * Reset balance_interval at all sched_domain levels of given cpu, so that it
  * honors kick.
@@ -11411,26 +12030,6 @@
 	return 0;
 }
 
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
-	unsigned long flags;
-	int rc = 0;
-
-	/* Invoke active balance to force migrate currently running task */
-	raw_spin_lock_irqsave(&rq->lock, flags);
-	if (!rq->active_balance) {
-		rq->active_balance = 1;
-		rq->push_cpu = new_cpu;
-		get_task_struct(p);
-		rq->push_task = p;
-		rc = 1;
-	}
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-	return rc;
-}
-
 static DEFINE_RAW_SPINLOCK(migration_lock);
 
 /*
@@ -11706,4 +12305,30 @@
 }
 #endif /* CONFIG_CFS_BANDWIDTH */
 
+#elif defined(CONFIG_SCHED_WALT)
+
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+	int new_cpu;
+	int active_balance;
+	int cpu = task_cpu(p);
+
+	if (rq->misfit_task) {
+		if (rq->curr->state != TASK_RUNNING ||
+		    rq->curr->nr_cpus_allowed == 1)
+			return;
+
+		new_cpu = energy_aware_wake_cpu(p, cpu, 0);
+		if (new_cpu != cpu) {
+			active_balance = kick_active_balance(rq, p, new_cpu);
+			if (active_balance) {
+				mark_reserved(new_cpu);
+				stop_one_cpu_nowait(cpu,
+					active_load_balance_cpu_stop, rq,
+					&rq->active_balance_work);
+			}
+		}
+	}
+}
+
 #endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 4de373f..6c28298 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -19,53 +19,12 @@
 #include <linux/syscore_ops.h>
 
 #include "sched.h"
+#include "walt.h"
 
 #include <trace/events/sched.h>
 
 #define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
 
-const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
-		"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", "IRQ_UPDATE"};
-
-const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"};
-
-static ktime_t ktime_last;
-static bool sched_ktime_suspended;
-
-static bool use_cycle_counter;
-static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
-
-u64 sched_ktime_clock(void)
-{
-	if (unlikely(sched_ktime_suspended))
-		return ktime_to_ns(ktime_last);
-	return ktime_get_ns();
-}
-
-static void sched_resume(void)
-{
-	sched_ktime_suspended = false;
-}
-
-static int sched_suspend(void)
-{
-	ktime_last = ktime_get();
-	sched_ktime_suspended = true;
-	return 0;
-}
-
-static struct syscore_ops sched_syscore_ops = {
-	.resume	= sched_resume,
-	.suspend = sched_suspend
-};
-
-static int __init sched_init_ops(void)
-{
-	register_syscore_ops(&sched_syscore_ops);
-	return 0;
-}
-late_initcall(sched_init_ops);
-
 inline void clear_ed_task(struct task_struct *p, struct rq *rq)
 {
 	if (p == rq->ed_task)
@@ -222,428 +181,11 @@
 	return ret;
 }
 
-unsigned int max_possible_efficiency = 1;
-unsigned int min_possible_efficiency = UINT_MAX;
-
 unsigned long __weak arch_get_cpu_efficiency(int cpu)
 {
 	return SCHED_CAPACITY_SCALE;
 }
 
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
-	int i;
-	int max_cap = 0, min_cap = INT_MAX;
-
-	for_each_online_cpu(i) {
-		max_cap = max(max_cap, cpu_capacity(i));
-		min_cap = min(min_cap, cpu_capacity(i));
-	}
-
-	max_capacity = max_cap;
-	min_capacity = min_cap;
-}
-
-static void update_min_max_capacity(void)
-{
-	unsigned long flags;
-	int i;
-
-	local_irq_save(flags);
-	for_each_possible_cpu(i)
-		raw_spin_lock(&cpu_rq(i)->lock);
-
-	__update_min_max_capacity();
-
-	for_each_possible_cpu(i)
-		raw_spin_unlock(&cpu_rq(i)->lock);
-	local_irq_restore(flags);
-}
-
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long
-capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
-	return (1024 * cluster->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
-{
-	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static inline unsigned long
-load_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
-	return DIV_ROUND_UP(1024 * max_possible_efficiency,
-			    cluster->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
-{
-	return DIV_ROUND_UP(1024 * max_possible_freq,
-			   cluster_max_freq(cluster));
-}
-
-static int compute_capacity(struct sched_cluster *cluster)
-{
-	int capacity = 1024;
-
-	capacity *= capacity_scale_cpu_efficiency(cluster);
-	capacity >>= 10;
-
-	capacity *= capacity_scale_cpu_freq(cluster);
-	capacity >>= 10;
-
-	return capacity;
-}
-
-static int compute_max_possible_capacity(struct sched_cluster *cluster)
-{
-	int capacity = 1024;
-
-	capacity *= capacity_scale_cpu_efficiency(cluster);
-	capacity >>= 10;
-
-	capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
-	capacity >>= 10;
-
-	return capacity;
-}
-
-static int compute_load_scale_factor(struct sched_cluster *cluster)
-{
-	int load_scale = 1024;
-
-	/*
-	 * load_scale_factor accounts for the fact that task load
-	 * is in reference to "best" performing cpu. Task's load will need to be
-	 * scaled (up) by a factor to determine suitability to be placed on a
-	 * (little) cpu.
-	 */
-	load_scale *= load_scale_cpu_efficiency(cluster);
-	load_scale >>= 10;
-
-	load_scale *= load_scale_cpu_freq(cluster);
-	load_scale >>= 10;
-
-	return load_scale;
-}
-
-struct list_head cluster_head;
-static DEFINE_MUTEX(cluster_lock);
-static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
-DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
-struct sched_cluster *sched_cluster[NR_CPUS];
-int num_clusters;
-
-unsigned int max_power_cost = 1;
-
-struct sched_cluster init_cluster = {
-	.list			=	LIST_HEAD_INIT(init_cluster.list),
-	.id			=	0,
-	.max_power_cost		=	1,
-	.min_power_cost		=	1,
-	.capacity		=	1024,
-	.max_possible_capacity	=	1024,
-	.efficiency		=	1,
-	.load_scale_factor	=	1024,
-	.cur_freq		=	1,
-	.max_freq		=	1,
-	.max_mitigated_freq	=	UINT_MAX,
-	.min_freq		=	1,
-	.max_possible_freq	=	1,
-	.dstate			=	0,
-	.dstate_wakeup_energy	=	0,
-	.dstate_wakeup_latency	=	0,
-	.exec_scale_factor	=	1024,
-	.notifier_sent		=	0,
-	.wake_up_idle		=	0,
-};
-
-static void update_all_clusters_stats(void)
-{
-	struct sched_cluster *cluster;
-	u64 highest_mpc = 0, lowest_mpc = U64_MAX;
-
-	pre_big_task_count_change(cpu_possible_mask);
-
-	for_each_sched_cluster(cluster) {
-		u64 mpc;
-
-		cluster->capacity = compute_capacity(cluster);
-		mpc = cluster->max_possible_capacity =
-			compute_max_possible_capacity(cluster);
-		cluster->load_scale_factor = compute_load_scale_factor(cluster);
-
-		cluster->exec_scale_factor =
-			DIV_ROUND_UP(cluster->efficiency * 1024,
-				     max_possible_efficiency);
-
-		if (mpc > highest_mpc)
-			highest_mpc = mpc;
-
-		if (mpc < lowest_mpc)
-			lowest_mpc = mpc;
-	}
-
-	max_possible_capacity = highest_mpc;
-	min_max_possible_capacity = lowest_mpc;
-
-	__update_min_max_capacity();
-	sched_update_freq_max_load(cpu_possible_mask);
-	post_big_task_count_change(cpu_possible_mask);
-}
-
-static void assign_cluster_ids(struct list_head *head)
-{
-	struct sched_cluster *cluster;
-	int pos = 0;
-
-	list_for_each_entry(cluster, head, list) {
-		cluster->id = pos;
-		sched_cluster[pos++] = cluster;
-	}
-}
-
-static void
-move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
-{
-	struct list_head *first, *last;
-
-	first = src->next;
-	last = src->prev;
-
-	if (sync_rcu) {
-		INIT_LIST_HEAD_RCU(src);
-		synchronize_rcu();
-	}
-
-	first->prev = dst;
-	dst->prev = last;
-	last->next = dst;
-
-	/* Ensure list sanity before making the head visible to all CPUs. */
-	smp_mb();
-	dst->next = first;
-}
-
-static int
-compare_clusters(void *priv, struct list_head *a, struct list_head *b)
-{
-	struct sched_cluster *cluster1, *cluster2;
-	int ret;
-
-	cluster1 = container_of(a, struct sched_cluster, list);
-	cluster2 = container_of(b, struct sched_cluster, list);
-
-	/*
-	 * Don't assume higher capacity means higher power. If the
-	 * power cost is same, sort the higher capacity cluster before
-	 * the lower capacity cluster to start placing the tasks
-	 * on the higher capacity cluster.
-	 */
-	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
-		(cluster1->max_power_cost == cluster2->max_power_cost &&
-		cluster1->max_possible_capacity <
-				cluster2->max_possible_capacity);
-
-	return ret;
-}
-
-static void sort_clusters(void)
-{
-	struct sched_cluster *cluster;
-	struct list_head new_head;
-	unsigned int tmp_max = 1;
-
-	INIT_LIST_HEAD(&new_head);
-
-	for_each_sched_cluster(cluster) {
-		cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
-							       max_task_load());
-		cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
-							       0);
-
-		if (cluster->max_power_cost > tmp_max)
-			tmp_max = cluster->max_power_cost;
-	}
-	max_power_cost = tmp_max;
-
-	move_list(&new_head, &cluster_head, true);
-
-	list_sort(NULL, &new_head, compare_clusters);
-	assign_cluster_ids(&new_head);
-
-	/*
-	 * Ensure cluster ids are visible to all CPUs before making
-	 * cluster_head visible.
-	 */
-	move_list(&cluster_head, &new_head, false);
-}
-
-static void
-insert_cluster(struct sched_cluster *cluster, struct list_head *head)
-{
-	struct sched_cluster *tmp;
-	struct list_head *iter = head;
-
-	list_for_each_entry(tmp, head, list) {
-		if (cluster->max_power_cost < tmp->max_power_cost)
-			break;
-		iter = &tmp->list;
-	}
-
-	list_add(&cluster->list, iter);
-}
-
-static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
-{
-	struct sched_cluster *cluster = NULL;
-
-	cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
-	if (!cluster) {
-		__WARN_printf("Cluster allocation failed. \
-				Possible bad scheduling\n");
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&cluster->list);
-	cluster->max_power_cost		=	1;
-	cluster->min_power_cost		=	1;
-	cluster->capacity		=	1024;
-	cluster->max_possible_capacity	=	1024;
-	cluster->efficiency		=	1;
-	cluster->load_scale_factor	=	1024;
-	cluster->cur_freq		=	1;
-	cluster->max_freq		=	1;
-	cluster->max_mitigated_freq	=	UINT_MAX;
-	cluster->min_freq		=	1;
-	cluster->max_possible_freq	=	1;
-	cluster->dstate			=	0;
-	cluster->dstate_wakeup_energy	=	0;
-	cluster->dstate_wakeup_latency	=	0;
-	cluster->freq_init_done		=	false;
-
-	raw_spin_lock_init(&cluster->load_lock);
-	cluster->cpus = *cpus;
-	cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
-
-	if (cluster->efficiency > max_possible_efficiency)
-		max_possible_efficiency = cluster->efficiency;
-	if (cluster->efficiency < min_possible_efficiency)
-		min_possible_efficiency = cluster->efficiency;
-
-	cluster->notifier_sent = 0;
-	return cluster;
-}
-
-static void add_cluster(const struct cpumask *cpus, struct list_head *head)
-{
-	struct sched_cluster *cluster = alloc_new_cluster(cpus);
-	int i;
-
-	if (!cluster)
-		return;
-
-	for_each_cpu(i, cpus)
-		cpu_rq(i)->cluster = cluster;
-
-	insert_cluster(cluster, head);
-	set_bit(num_clusters, all_cluster_ids);
-	num_clusters++;
-}
-
-void update_cluster_topology(void)
-{
-	struct cpumask cpus = *cpu_possible_mask;
-	const struct cpumask *cluster_cpus;
-	struct list_head new_head;
-	int i;
-
-	INIT_LIST_HEAD(&new_head);
-
-	for_each_cpu(i, &cpus) {
-		cluster_cpus = cpu_coregroup_mask(i);
-		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
-		cpumask_andnot(&cpus, &cpus, cluster_cpus);
-		add_cluster(cluster_cpus, &new_head);
-	}
-
-	assign_cluster_ids(&new_head);
-
-	/*
-	 * Ensure cluster ids are visible to all CPUs before making
-	 * cluster_head visible.
-	 */
-	move_list(&cluster_head, &new_head, false);
-	update_all_clusters_stats();
-}
-
-void init_clusters(void)
-{
-	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
-	init_cluster.cpus = *cpu_possible_mask;
-	raw_spin_lock_init(&init_cluster.load_lock);
-	INIT_LIST_HEAD(&cluster_head);
-}
-
-int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
-	mutex_lock(&cluster_lock);
-	if (!cb->get_cpu_cycle_counter) {
-		mutex_unlock(&cluster_lock);
-		return -EINVAL;
-	}
-
-	cpu_cycle_counter_cb = *cb;
-	use_cycle_counter = true;
-	mutex_unlock(&cluster_lock);
-
-	return 0;
-}
-
-/* Clear any HMP scheduler related requests pending from or on cpu */
-void clear_hmp_request(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long flags;
-
-	clear_boost_kick(cpu);
-	clear_reserved(cpu);
-	if (rq->push_task) {
-		struct task_struct *push_task = NULL;
-
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (rq->push_task) {
-			clear_reserved(rq->push_cpu);
-			push_task = rq->push_task;
-			rq->push_task = NULL;
-		}
-		rq->active_balance = 0;
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-		if (push_task)
-			put_task_struct(push_task);
-	}
-}
-
 int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -684,49 +226,12 @@
 }
 
 /*
- * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
- * associated with them. This is required for atomic update of those variables
- * when being modifed via sysctl interface.
- *
- * IMPORTANT: Initialize both copies to same value!!
- */
-
-/*
  * Tasks that are runnable continuously for a period greather than
  * EARLY_DETECTION_DURATION can be flagged early as potential
  * high load tasks.
  */
 #define EARLY_DETECTION_DURATION 9500000
 
-static __read_mostly unsigned int sched_ravg_hist_size = 5;
-__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
-
-static __read_mostly unsigned int sched_window_stats_policy =
-	 WINDOW_STATS_MAX_RECENT_AVG;
-__read_mostly unsigned int sysctl_sched_window_stats_policy =
-	WINDOW_STATS_MAX_RECENT_AVG;
-
-#define SCHED_ACCOUNT_WAIT_TIME 1
-
-__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
-
-/*
- * Enable colocation and frequency aggregation for all threads in a process.
- * The children inherits the group id from the parent.
- */
-unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
-
-
-#define SCHED_NEW_TASK_WINDOWS 5
-
-#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
-
-/*
- * This governs what load needs to be used when reporting CPU busy time
- * to the cpufreq governor.
- */
-__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
-
 /*
  * For increase, send notification if
  *      freq_required - cur_freq > sysctl_sched_freq_inc_notify
@@ -738,129 +243,20 @@
  *      cur_freq - freq_required > sysctl_sched_freq_dec_notify
  */
 __read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
-
-static __read_mostly unsigned int sched_io_is_busy;
-
 __read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
 
-/*
- * Maximum possible frequency across all cpus. Task demand and cpu
- * capacity (cpu_power) metrics are scaled in reference to it.
- */
-unsigned int max_possible_freq = 1;
-
-/*
- * Minimum possible max_freq across all cpus. This will be same as
- * max_possible_freq on homogeneous systems and could be different from
- * max_possible_freq on heterogenous systems. min_max_freq is used to derive
- * capacity (cpu_power) of cpus.
- */
-unsigned int min_max_freq = 1;
-
-unsigned int max_capacity = 1024; /* max(rq->capacity) */
-unsigned int min_capacity = 1024; /* min(rq->capacity) */
-unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
-unsigned int
-min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
-
-/* Min window size (in ns) = 10ms */
-#define MIN_SCHED_RAVG_WINDOW 10000000
-
-/* Max window size (in ns) = 1s */
-#define MAX_SCHED_RAVG_WINDOW 1000000000
-
-/* Window size (in ns) */
-__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
-
 /* Maximum allowed threshold before freq aggregation must be enabled */
 #define MAX_FREQ_AGGR_THRESH 1000
 
-/* Temporarily disable window-stats activity on all cpus */
-unsigned int __read_mostly sched_disable_window_stats;
-
-struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
-static LIST_HEAD(active_related_thread_groups);
-static DEFINE_RWLOCK(related_thread_group_lock);
-
 #define for_each_related_thread_group(grp) \
 	list_for_each_entry(grp, &active_related_thread_groups, list)
 
-/*
- * Task load is categorized into buckets for the purpose of top task tracking.
- * The entire range of load from 0 to sched_ravg_window needs to be covered
- * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
- * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
- * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
- * sched_load_granule.
- */
-__read_mostly unsigned int sched_load_granule =
-			MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
-
 /* Size of bitmaps maintained to track top tasks */
 static const unsigned int top_tasks_bitmap_size =
 		BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
 
-/*
- * Demand aggregation for frequency purpose:
- *
- * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
- * for frequency determination purpose. This aggregation is done per-cluster.
- *
- * CPU demand of tasks from various related groups is aggregated per-cluster and
- * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
- * by just rq->prev_runnable_sum.
- *
- * Some examples follow, which assume:
- *	Cluster0 = CPU0-3, Cluster1 = CPU4-7
- *	One related thread group A that has tasks A0, A1, A2
- *
- *	A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
- *	tasks belonging to group A are accumulated when they run on cpu X.
- *
- *	CX->curr/prev_sum = counters in which cpu execution stats of all tasks
- *	not belonging to group A are accumulated when they run on cpu X
- *
- * Lets say the stats for window M was as below:
- *
- *	C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
- *		Task A0 ran 5ms on CPU0
- *		Task B0 ran 1ms on CPU0
- *
- *	C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
- *		Task A1 ran 4ms on CPU1
- *		Task A2 ran 2ms on CPU1
- *		Task B1 ran 5ms on CPU1
- *
- *	C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
- *		CPU2 idle
- *
- *	C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
- *		CPU3 idle
- *
- * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
- * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
- * time reported to governor will be:
- *
- *
- *	C0 busy time = 1ms
- *	C1 busy time = 5 + 5 + 6 = 16ms
- *
- */
-static __read_mostly unsigned int sched_freq_aggregate = 1;
 __read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
 
-unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
-static unsigned int __read_mostly sched_freq_aggregate_threshold;
-
-/* Initial task load. Newly created tasks are assigned this load. */
-unsigned int __read_mostly sched_init_task_load_windows;
-unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
-
-unsigned int max_task_load(void)
-{
-	return sched_ravg_window;
-}
-
 /* A cpu can no longer accommodate more tasks if:
  *
  *	rq->nr_running > sysctl_sched_spill_nr_run ||
@@ -912,21 +308,6 @@
 unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
 
 /*
- * Task groups whose aggregate demand on a cpu is more than
- * sched_group_upmigrate need to be up-migrated if possible.
- */
-unsigned int __read_mostly sched_group_upmigrate;
-unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100;
-
-/*
- * Task groups, once up-migrated, will need to drop their aggregate
- * demand to less than sched_group_downmigrate before they are "down"
- * migrated.
- */
-unsigned int __read_mostly sched_group_downmigrate;
-unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
-
-/*
  * The load scale factor of a CPU gets boosted when its max frequency
  * is restricted due to which the tasks are migrating to higher capacity
  * CPUs early. The sched_upmigrate threshold is auto-upgraded by
@@ -1027,21 +408,6 @@
 		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
 }
 
-u32 sched_get_init_task_load(struct task_struct *p)
-{
-	return p->init_load_pct;
-}
-
-int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
-{
-	if (init_load_pct < 0 || init_load_pct > 100)
-		return -EINVAL;
-
-	p->init_load_pct = init_load_pct;
-
-	return 0;
-}
-
 #ifdef CONFIG_CGROUP_SCHED
 
 int upmigrate_discouraged(struct task_struct *p)
@@ -1129,37 +495,6 @@
 	return task_load_will_fit(p, tload, cpu, sched_boost_policy());
 }
 
-static int
-group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp,
-						u64 demand, bool group_boost)
-{
-	int cpu = cluster_first_cpu(cluster);
-	int prev_capacity = 0;
-	unsigned int threshold = sched_group_upmigrate;
-	u64 load;
-
-	if (cluster->capacity == max_capacity)
-		return 1;
-
-	if (group_boost)
-		return 0;
-
-	if (!demand)
-		return 1;
-
-	if (grp->preferred_cluster)
-		prev_capacity = grp->preferred_cluster->capacity;
-
-	if (cluster->capacity < prev_capacity)
-		threshold = sched_group_downmigrate;
-
-	load = scale_load_to_cpu(demand, cpu);
-	if (load < threshold)
-		return 1;
-
-	return 0;
-}
-
 /*
  * Return the cost of running task p on CPU cpu. This function
  * currently assumes that task p is the only task which will run on
@@ -1232,64 +567,6 @@
 
 }
 
-void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	if (is_big_task(p))
-		stats->nr_big_tasks++;
-}
-
-void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	if (is_big_task(p))
-		stats->nr_big_tasks--;
-
-	BUG_ON(stats->nr_big_tasks < 0);
-}
-
-void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
-{
-	inc_nr_big_task(&rq->hmp_stats, p);
-	if (change_cra)
-		inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
-{
-	dec_nr_big_task(&rq->hmp_stats, p);
-	if (change_cra)
-		dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
-{
-	stats->nr_big_tasks = 0;
-	if (reset_cra) {
-		stats->cumulative_runnable_avg = 0;
-		stats->pred_demands_sum = 0;
-	}
-}
-
-int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
-{
-	struct related_thread_group *grp;
-	int rc = 1;
-
-	rcu_read_lock();
-
-	grp = task_related_thread_group(p);
-	if (grp)
-		rc = (grp->preferred_cluster == cluster);
-
-	rcu_read_unlock();
-	return rc;
-}
-
 struct sched_cluster *rq_cluster(struct rq *rq)
 {
 	return rq->cluster;
@@ -1370,25 +647,6 @@
 	local_irq_enable();
 }
 
-DEFINE_MUTEX(policy_mutex);
-
-unsigned int update_freq_aggregate_threshold(unsigned int threshold)
-{
-	unsigned int old_threshold;
-
-	mutex_lock(&policy_mutex);
-
-	old_threshold = sysctl_sched_freq_aggregate_threshold_pct;
-
-	sysctl_sched_freq_aggregate_threshold_pct = threshold;
-	sched_freq_aggregate_threshold =
-		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
-
-	mutex_unlock(&policy_mutex);
-
-	return old_threshold;
-}
-
 static inline int invalid_value_freq_input(unsigned int *data)
 {
 	if (data == &sysctl_sched_freq_aggregate)
@@ -1539,46 +797,6 @@
 	p->ravg.prev_window_cpu = NULL;
 }
 
-void init_new_task_load(struct task_struct *p, bool idle_task)
-{
-	int i;
-	u32 init_load_windows = sched_init_task_load_windows;
-	u32 init_load_pct = current->init_load_pct;
-
-	p->init_load_pct = 0;
-	rcu_assign_pointer(p->grp, NULL);
-	INIT_LIST_HEAD(&p->grp_list);
-	memset(&p->ravg, 0, sizeof(struct ravg));
-	p->cpu_cycles = 0;
-	p->ravg.curr_burst = 0;
-	/*
-	 * Initialize the avg_burst to twice the threshold, so that
-	 * a task would not be classified as short burst right away
-	 * after fork. It takes at least 6 sleep-wakeup cycles for
-	 * the avg_burst to go below the threshold.
-	 */
-	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-	p->ravg.avg_sleep_time = 0;
-
-	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
-	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
-
-	/* Don't have much choice. CPU frequency would be bogus */
-	BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
-
-	if (idle_task)
-		return;
-
-	if (init_load_pct)
-		init_load_windows = div64_u64((u64)init_load_pct *
-			  (u64)sched_ravg_window, 100);
-
-	p->ravg.demand = init_load_windows;
-	p->ravg.pred_demand = 0;
-	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
-		p->ravg.sum_history[i] = init_load_windows;
-}
-
 /* Return task demand in percentage scale */
 unsigned int pct_task_load(struct task_struct *p)
 {
@@ -1589,29 +807,6 @@
 	return load;
 }
 
-/*
- * Return total number of tasks "eligible" to run on highest capacity cpu
- *
- * This is simply nr_big_tasks for cpus which are not of max_capacity and
- * nr_running for cpus of max_capacity
- */
-unsigned int nr_eligible_big_tasks(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	int nr_big = rq->hmp_stats.nr_big_tasks;
-	int nr = rq->nr_running;
-
-	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
-		return nr_big;
-
-	return nr;
-}
-
-static inline int exiting_task(struct task_struct *p)
-{
-	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
-}
-
 static int __init set_sched_ravg_window(char *str)
 {
 	unsigned int window_size;
@@ -1630,21 +825,6 @@
 
 early_param("sched_ravg_window", set_sched_ravg_window);
 
-static inline void
-update_window_start(struct rq *rq, u64 wallclock)
-{
-	s64 delta;
-	int nr_windows;
-
-	delta = wallclock - rq->window_start;
-	BUG_ON(delta < 0);
-	if (delta < sched_ravg_window)
-		return;
-
-	nr_windows = div64_u64(delta, sched_ravg_window);
-	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
-}
-
 #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
 
 static inline u64 scale_exec_time(u64 delta, struct rq *rq)
@@ -1659,14 +839,6 @@
 	return delta;
 }
 
-static inline int cpu_is_waiting_on_io(struct rq *rq)
-{
-	if (!sched_io_is_busy)
-		return 0;
-
-	return atomic_read(&rq->nr_iowait);
-}
-
 /* Does freq_required sufficiently exceed or fall behind cur_freq? */
 static inline int
 nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
@@ -1712,7 +884,6 @@
 	}
 }
 
-static inline u64 freq_policy_load(struct rq *rq, u64 load);
 /*
  * Should scheduler alert governor for changing frequency?
  *
@@ -1814,44 +985,6 @@
 	}
 }
 
-static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
-				     u64 irqtime, int event)
-{
-	if (is_idle_task(p)) {
-		/* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
-		if (event == PICK_NEXT_TASK)
-			return 0;
-
-		/* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
-		return irqtime || cpu_is_waiting_on_io(rq);
-	}
-
-	if (event == TASK_WAKE)
-		return 0;
-
-	if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
-		return 1;
-
-	/*
-	 * TASK_UPDATE can be called on sleeping task, when its moved between
-	 * related groups
-	 */
-	if (event == TASK_UPDATE) {
-		if (rq->curr == p)
-			return 1;
-
-		return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
-	}
-
-	/* TASK_MIGRATE, PICK_NEXT_TASK left */
-	return SCHED_FREQ_ACCOUNT_WAIT_TIME;
-}
-
-static inline bool is_new_task(struct task_struct *p)
-{
-	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
-}
-
 #define INC_STEP 8
 #define DEC_STEP 2
 #define CONSISTENT_THRES 16
@@ -1906,12 +1039,6 @@
 	return bidx;
 }
 
-static inline u64
-scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
-{
-	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
-}
-
 /*
  * get_pred_busy - calculate predicted demand for a task on runqueue
  *
@@ -2004,975 +1131,6 @@
 			     p->ravg.curr_window);
 }
 
-/*
- * predictive demand of a task is calculated at the window roll-over.
- * if the task current window busy time exceeds the predicted
- * demand, update it here to reflect the task needs.
- */
-void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
-{
-	u32 new, old;
-
-	if (is_idle_task(p) || exiting_task(p))
-		return;
-
-	if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
-			(!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
-			 (event != TASK_MIGRATE &&
-			 event != PICK_NEXT_TASK)))
-		return;
-
-	/*
-	 * TASK_UPDATE can be called on sleeping task, when its moved between
-	 * related groups
-	 */
-	if (event == TASK_UPDATE) {
-		if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
-			return;
-	}
-
-	new = calc_pred_demand(rq, p);
-	old = p->ravg.pred_demand;
-
-	if (old >= new)
-		return;
-
-	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
-				!p->dl.dl_throttled))
-		p->sched_class->fixup_hmp_sched_stats(rq, p,
-				p->ravg.demand,
-				new);
-
-	p->ravg.pred_demand = new;
-}
-
-void clear_top_tasks_bitmap(unsigned long *bitmap)
-{
-	memset(bitmap, 0, top_tasks_bitmap_size);
-	__set_bit(NUM_LOAD_INDICES, bitmap);
-}
-
-/*
- * Special case the last index and provide a fast path for index = 0.
- * Note that sched_load_granule can change underneath us if we are not
- * holding any runqueue locks while calling the two functions below.
- */
-static u32  top_task_load(struct rq *rq)
-{
-	int index = rq->prev_top;
-	u8 prev = 1 - rq->curr_table;
-
-	if (!index) {
-		int msb = NUM_LOAD_INDICES - 1;
-
-		if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
-			return 0;
-		else
-			return sched_load_granule;
-	} else if (index == NUM_LOAD_INDICES - 1) {
-		return sched_ravg_window;
-	} else {
-		return (index + 1) * sched_load_granule;
-	}
-}
-
-static int load_to_index(u32 load)
-{
-	if (load < sched_load_granule)
-		return 0;
-	else if (load >= sched_ravg_window)
-		return NUM_LOAD_INDICES - 1;
-	else
-		return load / sched_load_granule;
-}
-
-static void update_top_tasks(struct task_struct *p, struct rq *rq,
-		u32 old_curr_window, int new_window, bool full_window)
-{
-	u8 curr = rq->curr_table;
-	u8 prev = 1 - curr;
-	u8 *curr_table = rq->top_tasks[curr];
-	u8 *prev_table = rq->top_tasks[prev];
-	int old_index, new_index, update_index;
-	u32 curr_window = p->ravg.curr_window;
-	u32 prev_window = p->ravg.prev_window;
-	bool zero_index_update;
-
-	if (old_curr_window == curr_window && !new_window)
-		return;
-
-	old_index = load_to_index(old_curr_window);
-	new_index = load_to_index(curr_window);
-
-	if (!new_window) {
-		zero_index_update = !old_curr_window && curr_window;
-		if (old_index != new_index || zero_index_update) {
-			if (old_curr_window)
-				curr_table[old_index] -= 1;
-			if (curr_window)
-				curr_table[new_index] += 1;
-			if (new_index > rq->curr_top)
-				rq->curr_top = new_index;
-		}
-
-		if (!curr_table[old_index])
-			__clear_bit(NUM_LOAD_INDICES - old_index - 1,
-				rq->top_tasks_bitmap[curr]);
-
-		if (curr_table[new_index] == 1)
-			__set_bit(NUM_LOAD_INDICES - new_index - 1,
-				rq->top_tasks_bitmap[curr]);
-
-		return;
-	}
-
-	/*
-	 * The window has rolled over for this task. By the time we get
-	 * here, curr/prev swaps would has already occurred. So we need
-	 * to use prev_window for the new index.
-	 */
-	update_index = load_to_index(prev_window);
-
-	if (full_window) {
-		/*
-		 * Two cases here. Either 'p' ran for the entire window or
-		 * it didn't run at all. In either case there is no entry
-		 * in the prev table. If 'p' ran the entire window, we just
-		 * need to create a new entry in the prev table. In this case
-		 * update_index will be correspond to sched_ravg_window
-		 * so we can unconditionally update the top index.
-		 */
-		if (prev_window) {
-			prev_table[update_index] += 1;
-			rq->prev_top = update_index;
-		}
-
-		if (prev_table[update_index] == 1)
-			__set_bit(NUM_LOAD_INDICES - update_index - 1,
-				rq->top_tasks_bitmap[prev]);
-	} else {
-		zero_index_update = !old_curr_window && prev_window;
-		if (old_index != update_index || zero_index_update) {
-			if (old_curr_window)
-				prev_table[old_index] -= 1;
-
-			prev_table[update_index] += 1;
-
-			if (update_index > rq->prev_top)
-				rq->prev_top = update_index;
-
-			if (!prev_table[old_index])
-				__clear_bit(NUM_LOAD_INDICES - old_index - 1,
-						rq->top_tasks_bitmap[prev]);
-
-			if (prev_table[update_index] == 1)
-				__set_bit(NUM_LOAD_INDICES - update_index - 1,
-						rq->top_tasks_bitmap[prev]);
-		}
-	}
-
-	if (curr_window) {
-		curr_table[new_index] += 1;
-
-		if (new_index > rq->curr_top)
-			rq->curr_top = new_index;
-
-		if (curr_table[new_index] == 1)
-			__set_bit(NUM_LOAD_INDICES - new_index - 1,
-				rq->top_tasks_bitmap[curr]);
-	}
-}
-
-static inline void clear_top_tasks_table(u8 *table)
-{
-	memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
-}
-
-static void rollover_top_tasks(struct rq *rq, bool full_window)
-{
-	u8 curr_table = rq->curr_table;
-	u8 prev_table = 1 - curr_table;
-	int curr_top = rq->curr_top;
-
-	clear_top_tasks_table(rq->top_tasks[prev_table]);
-	clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
-
-	if (full_window) {
-		curr_top = 0;
-		clear_top_tasks_table(rq->top_tasks[curr_table]);
-		clear_top_tasks_bitmap(
-				rq->top_tasks_bitmap[curr_table]);
-	}
-
-	rq->curr_table = prev_table;
-	rq->prev_top = curr_top;
-	rq->curr_top = 0;
-}
-
-static u32 empty_windows[NR_CPUS];
-
-static void rollover_task_window(struct task_struct *p, bool full_window)
-{
-	u32 *curr_cpu_windows = empty_windows;
-	u32 curr_window;
-	int i;
-
-	/* Rollover the sum */
-	curr_window = 0;
-
-	if (!full_window) {
-		curr_window = p->ravg.curr_window;
-		curr_cpu_windows = p->ravg.curr_window_cpu;
-	}
-
-	p->ravg.prev_window = curr_window;
-	p->ravg.curr_window = 0;
-
-	/* Roll over individual CPU contributions */
-	for (i = 0; i < nr_cpu_ids; i++) {
-		p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
-		p->ravg.curr_window_cpu[i] = 0;
-	}
-}
-
-static void rollover_cpu_window(struct rq *rq, bool full_window)
-{
-	u64 curr_sum = rq->curr_runnable_sum;
-	u64 nt_curr_sum = rq->nt_curr_runnable_sum;
-	u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
-	u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
-
-	if (unlikely(full_window)) {
-		curr_sum = 0;
-		nt_curr_sum = 0;
-		grp_curr_sum = 0;
-		grp_nt_curr_sum = 0;
-	}
-
-	rq->prev_runnable_sum = curr_sum;
-	rq->nt_prev_runnable_sum = nt_curr_sum;
-	rq->grp_time.prev_runnable_sum = grp_curr_sum;
-	rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
-
-	rq->curr_runnable_sum = 0;
-	rq->nt_curr_runnable_sum = 0;
-	rq->grp_time.curr_runnable_sum = 0;
-	rq->grp_time.nt_curr_runnable_sum = 0;
-}
-
-/*
- * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
- */
-static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
-				 int event, u64 wallclock, u64 irqtime)
-{
-	int new_window, full_window = 0;
-	int p_is_curr_task = (p == rq->curr);
-	u64 mark_start = p->ravg.mark_start;
-	u64 window_start = rq->window_start;
-	u32 window_size = sched_ravg_window;
-	u64 delta;
-	u64 *curr_runnable_sum = &rq->curr_runnable_sum;
-	u64 *prev_runnable_sum = &rq->prev_runnable_sum;
-	u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
-	u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
-	bool new_task;
-	struct related_thread_group *grp;
-	int cpu = rq->cpu;
-	u32 old_curr_window = p->ravg.curr_window;
-
-	new_window = mark_start < window_start;
-	if (new_window) {
-		full_window = (window_start - mark_start) >= window_size;
-		if (p->ravg.active_windows < USHRT_MAX)
-			p->ravg.active_windows++;
-	}
-
-	new_task = is_new_task(p);
-
-	/*
-	 * Handle per-task window rollover. We don't care about the idle
-	 * task or exiting tasks.
-	 */
-	if (!is_idle_task(p) && !exiting_task(p)) {
-		if (new_window)
-			rollover_task_window(p, full_window);
-	}
-
-	if (p_is_curr_task && new_window) {
-		rollover_cpu_window(rq, full_window);
-		rollover_top_tasks(rq, full_window);
-	}
-
-	if (!account_busy_for_cpu_time(rq, p, irqtime, event))
-		goto done;
-
-	grp = p->grp;
-	if (grp && sched_freq_aggregate) {
-		struct group_cpu_time *cpu_time = &rq->grp_time;
-
-		curr_runnable_sum = &cpu_time->curr_runnable_sum;
-		prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
-		nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
-		nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-	}
-
-	if (!new_window) {
-		/*
-		 * account_busy_for_cpu_time() = 1 so busy time needs
-		 * to be accounted to the current window. No rollover
-		 * since we didn't start a new window. An example of this is
-		 * when a task starts execution and then sleeps within the
-		 * same window.
-		 */
-
-		if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
-			delta = wallclock - mark_start;
-		else
-			delta = irqtime;
-		delta = scale_exec_time(delta, rq);
-		*curr_runnable_sum += delta;
-		if (new_task)
-			*nt_curr_runnable_sum += delta;
-
-		if (!is_idle_task(p) && !exiting_task(p)) {
-			p->ravg.curr_window += delta;
-			p->ravg.curr_window_cpu[cpu] += delta;
-		}
-
-		goto done;
-	}
-
-	if (!p_is_curr_task) {
-		/*
-		 * account_busy_for_cpu_time() = 1 so busy time needs
-		 * to be accounted to the current window. A new window
-		 * has also started, but p is not the current task, so the
-		 * window is not rolled over - just split up and account
-		 * as necessary into curr and prev. The window is only
-		 * rolled over when a new window is processed for the current
-		 * task.
-		 *
-		 * Irqtime can't be accounted by a task that isn't the
-		 * currently running task.
-		 */
-
-		if (!full_window) {
-			/*
-			 * A full window hasn't elapsed, account partial
-			 * contribution to previous completed window.
-			 */
-			delta = scale_exec_time(window_start - mark_start, rq);
-			if (!exiting_task(p)) {
-				p->ravg.prev_window += delta;
-				p->ravg.prev_window_cpu[cpu] += delta;
-			}
-		} else {
-			/*
-			 * Since at least one full window has elapsed,
-			 * the contribution to the previous window is the
-			 * full window (window_size).
-			 */
-			delta = scale_exec_time(window_size, rq);
-			if (!exiting_task(p)) {
-				p->ravg.prev_window = delta;
-				p->ravg.prev_window_cpu[cpu] = delta;
-			}
-		}
-
-		*prev_runnable_sum += delta;
-		if (new_task)
-			*nt_prev_runnable_sum += delta;
-
-		/* Account piece of busy time in the current window. */
-		delta = scale_exec_time(wallclock - window_start, rq);
-		*curr_runnable_sum += delta;
-		if (new_task)
-			*nt_curr_runnable_sum += delta;
-
-		if (!exiting_task(p)) {
-			p->ravg.curr_window = delta;
-			p->ravg.curr_window_cpu[cpu] = delta;
-		}
-
-		goto done;
-	}
-
-	if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
-		/*
-		 * account_busy_for_cpu_time() = 1 so busy time needs
-		 * to be accounted to the current window. A new window
-		 * has started and p is the current task so rollover is
-		 * needed. If any of these three above conditions are true
-		 * then this busy time can't be accounted as irqtime.
-		 *
-		 * Busy time for the idle task or exiting tasks need not
-		 * be accounted.
-		 *
-		 * An example of this would be a task that starts execution
-		 * and then sleeps once a new window has begun.
-		 */
-
-		if (!full_window) {
-			/*
-			 * A full window hasn't elapsed, account partial
-			 * contribution to previous completed window.
-			 */
-			delta = scale_exec_time(window_start - mark_start, rq);
-			if (!is_idle_task(p) && !exiting_task(p)) {
-				p->ravg.prev_window += delta;
-				p->ravg.prev_window_cpu[cpu] += delta;
-			}
-		} else {
-			/*
-			 * Since at least one full window has elapsed,
-			 * the contribution to the previous window is the
-			 * full window (window_size).
-			 */
-			delta = scale_exec_time(window_size, rq);
-			if (!is_idle_task(p) && !exiting_task(p)) {
-				p->ravg.prev_window = delta;
-				p->ravg.prev_window_cpu[cpu] = delta;
-			}
-		}
-
-		/*
-		 * Rollover is done here by overwriting the values in
-		 * prev_runnable_sum and curr_runnable_sum.
-		 */
-		*prev_runnable_sum += delta;
-		if (new_task)
-			*nt_prev_runnable_sum += delta;
-
-		/* Account piece of busy time in the current window. */
-		delta = scale_exec_time(wallclock - window_start, rq);
-		*curr_runnable_sum += delta;
-		if (new_task)
-			*nt_curr_runnable_sum += delta;
-
-		if (!is_idle_task(p) && !exiting_task(p)) {
-			p->ravg.curr_window = delta;
-			p->ravg.curr_window_cpu[cpu] = delta;
-		}
-
-		goto done;
-	}
-
-	if (irqtime) {
-		/*
-		 * account_busy_for_cpu_time() = 1 so busy time needs
-		 * to be accounted to the current window. A new window
-		 * has started and p is the current task so rollover is
-		 * needed. The current task must be the idle task because
-		 * irqtime is not accounted for any other task.
-		 *
-		 * Irqtime will be accounted each time we process IRQ activity
-		 * after a period of idleness, so we know the IRQ busy time
-		 * started at wallclock - irqtime.
-		 */
-
-		BUG_ON(!is_idle_task(p));
-		mark_start = wallclock - irqtime;
-
-		/*
-		 * Roll window over. If IRQ busy time was just in the current
-		 * window then that is all that need be accounted.
-		 */
-		if (mark_start > window_start) {
-			*curr_runnable_sum = scale_exec_time(irqtime, rq);
-			return;
-		}
-
-		/*
-		 * The IRQ busy time spanned multiple windows. Process the
-		 * busy time preceding the current window start first.
-		 */
-		delta = window_start - mark_start;
-		if (delta > window_size)
-			delta = window_size;
-		delta = scale_exec_time(delta, rq);
-		*prev_runnable_sum += delta;
-
-		/* Process the remaining IRQ busy time in the current window. */
-		delta = wallclock - window_start;
-		rq->curr_runnable_sum = scale_exec_time(delta, rq);
-
-		return;
-	}
-
-done:
-	if (!is_idle_task(p) && !exiting_task(p))
-		update_top_tasks(p, rq, old_curr_window,
-					new_window, full_window);
-}
-
-static inline u32 predict_and_update_buckets(struct rq *rq,
-			struct task_struct *p, u32 runtime) {
-
-	int bidx;
-	u32 pred_demand;
-
-	bidx = busy_to_bucket(runtime);
-	pred_demand = get_pred_busy(rq, p, bidx, runtime);
-	bucket_increase(p->ravg.busy_buckets, bidx);
-
-	return pred_demand;
-}
-
-static void update_task_cpu_cycles(struct task_struct *p, int cpu)
-{
-	if (use_cycle_counter)
-		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
-}
-
-static void
-update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
-			  u64 wallclock, u64 irqtime)
-{
-	u64 cur_cycles;
-	int cpu = cpu_of(rq);
-
-	lockdep_assert_held(&rq->lock);
-
-	if (!use_cycle_counter) {
-		rq->cc.cycles = cpu_cur_freq(cpu);
-		rq->cc.time = 1;
-		return;
-	}
-
-	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
-
-	/*
-	 * If current task is idle task and irqtime == 0 CPU was
-	 * indeed idle and probably its cycle counter was not
-	 * increasing.  We still need estimatied CPU frequency
-	 * for IO wait time accounting.  Use the previously
-	 * calculated frequency in such a case.
-	 */
-	if (!is_idle_task(rq->curr) || irqtime) {
-		if (unlikely(cur_cycles < p->cpu_cycles))
-			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
-		else
-			rq->cc.cycles = cur_cycles - p->cpu_cycles;
-		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
-
-		if (event == IRQ_UPDATE && is_idle_task(p))
-			/*
-			 * Time between mark_start of idle task and IRQ handler
-			 * entry time is CPU cycle counter stall period.
-			 * Upon IRQ handler entry sched_account_irqstart()
-			 * replenishes idle task's cpu cycle counter so
-			 * rq->cc.cycles now represents increased cycles during
-			 * IRQ handler rather than time between idle entry and
-			 * IRQ exit.  Thus use irqtime as time delta.
-			 */
-			rq->cc.time = irqtime;
-		else
-			rq->cc.time = wallclock - p->ravg.mark_start;
-		BUG_ON((s64)rq->cc.time < 0);
-	}
-
-	p->cpu_cycles = cur_cycles;
-
-	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
-}
-
-static int
-account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
-{
-	/*
-	 * No need to bother updating task demand for exiting tasks
-	 * or the idle task.
-	 */
-	if (exiting_task(p) || is_idle_task(p))
-		return 0;
-
-	/*
-	 * When a task is waking up it is completing a segment of non-busy
-	 * time. Likewise, if wait time is not treated as busy time, then
-	 * when a task begins to run or is migrated, it is not running and
-	 * is completing a segment of non-busy time.
-	 */
-	if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
-			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
-		return 0;
-
-	/*
-	 * TASK_UPDATE can be called on sleeping task, when its moved between
-	 * related groups
-	 */
-	if (event == TASK_UPDATE) {
-		if (rq->curr == p)
-			return 1;
-
-		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
-	}
-
-	return 1;
-}
-
-/*
- * Called when new window is starting for a task, to record cpu usage over
- * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
- * when, say, a real-time task runs without preemption for several windows at a
- * stretch.
- */
-static void update_history(struct rq *rq, struct task_struct *p,
-			 u32 runtime, int samples, int event)
-{
-	u32 *hist = &p->ravg.sum_history[0];
-	int ridx, widx;
-	u32 max = 0, avg, demand, pred_demand;
-	u64 sum = 0;
-
-	/* Ignore windows where task had no activity */
-	if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
-		goto done;
-
-	/* Push new 'runtime' value onto stack */
-	widx = sched_ravg_hist_size - 1;
-	ridx = widx - samples;
-	for (; ridx >= 0; --widx, --ridx) {
-		hist[widx] = hist[ridx];
-		sum += hist[widx];
-		if (hist[widx] > max)
-			max = hist[widx];
-	}
-
-	for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
-		hist[widx] = runtime;
-		sum += hist[widx];
-		if (hist[widx] > max)
-			max = hist[widx];
-	}
-
-	p->ravg.sum = 0;
-
-	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
-		demand = runtime;
-	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
-		demand = max;
-	} else {
-		avg = div64_u64(sum, sched_ravg_hist_size);
-		if (sched_window_stats_policy == WINDOW_STATS_AVG)
-			demand = avg;
-		else
-			demand = max(avg, runtime);
-	}
-	pred_demand = predict_and_update_buckets(rq, p, runtime);
-
-	/*
-	 * A throttled deadline sched class task gets dequeued without
-	 * changing p->on_rq. Since the dequeue decrements hmp stats
-	 * avoid decrementing it here again.
-	 */
-	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
-						!p->dl.dl_throttled))
-		p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
-						      pred_demand);
-
-	p->ravg.demand = demand;
-	p->ravg.pred_demand = pred_demand;
-
-done:
-	trace_sched_update_history(rq, p, runtime, samples, event);
-}
-
-static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
-{
-	delta = scale_exec_time(delta, rq);
-	p->ravg.sum += delta;
-	if (unlikely(p->ravg.sum > sched_ravg_window))
-		p->ravg.sum = sched_ravg_window;
-
-	return delta;
-}
-
-/*
- * Account cpu demand of task and/or update task's cpu demand history
- *
- * ms = p->ravg.mark_start;
- * wc = wallclock
- * ws = rq->window_start
- *
- * Three possibilities:
- *
- *	a) Task event is contained within one window.
- *		window_start < mark_start < wallclock
- *
- *		ws   ms  wc
- *		|    |   |
- *		V    V   V
- *		|---------------|
- *
- *	In this case, p->ravg.sum is updated *iff* event is appropriate
- *	(ex: event == PUT_PREV_TASK)
- *
- *	b) Task event spans two windows.
- *		mark_start < window_start < wallclock
- *
- *		ms   ws   wc
- *		|    |    |
- *		V    V    V
- *		-----|-------------------
- *
- *	In this case, p->ravg.sum is updated with (ws - ms) *iff* event
- *	is appropriate, then a new window sample is recorded followed
- *	by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
- *
- *	c) Task event spans more than two windows.
- *
- *		ms ws_tmp			   ws  wc
- *		|  |				   |   |
- *		V  V				   V   V
- *		---|-------|-------|-------|-------|------
- *		   |				   |
- *		   |<------ nr_full_windows ------>|
- *
- *	In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
- *	event is appropriate, window sample of p->ravg.sum is recorded,
- *	'nr_full_window' samples of window_size is also recorded *iff*
- *	event is appropriate and finally p->ravg.sum is set to (wc - ws)
- *	*iff* event is appropriate.
- *
- * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
- * depends on it!
- */
-static u64 update_task_demand(struct task_struct *p, struct rq *rq,
-			       int event, u64 wallclock)
-{
-	u64 mark_start = p->ravg.mark_start;
-	u64 delta, window_start = rq->window_start;
-	int new_window, nr_full_windows;
-	u32 window_size = sched_ravg_window;
-	u64 runtime;
-
-	new_window = mark_start < window_start;
-	if (!account_busy_for_task_demand(rq, p, event)) {
-		if (new_window)
-			/*
-			 * If the time accounted isn't being accounted as
-			 * busy time, and a new window started, only the
-			 * previous window need be closed out with the
-			 * pre-existing demand. Multiple windows may have
-			 * elapsed, but since empty windows are dropped,
-			 * it is not necessary to account those.
-			 */
-			update_history(rq, p, p->ravg.sum, 1, event);
-		return 0;
-	}
-
-	if (!new_window) {
-		/*
-		 * The simple case - busy time contained within the existing
-		 * window.
-		 */
-		return add_to_task_demand(rq, p, wallclock - mark_start);
-	}
-
-	/*
-	 * Busy time spans at least two windows. Temporarily rewind
-	 * window_start to first window boundary after mark_start.
-	 */
-	delta = window_start - mark_start;
-	nr_full_windows = div64_u64(delta, window_size);
-	window_start -= (u64)nr_full_windows * (u64)window_size;
-
-	/* Process (window_start - mark_start) first */
-	runtime = add_to_task_demand(rq, p, window_start - mark_start);
-
-	/* Push new sample(s) into task's demand history */
-	update_history(rq, p, p->ravg.sum, 1, event);
-	if (nr_full_windows) {
-		u64 scaled_window = scale_exec_time(window_size, rq);
-
-		update_history(rq, p, scaled_window, nr_full_windows, event);
-		runtime += nr_full_windows * scaled_window;
-	}
-
-	/*
-	 * Roll window_start back to current to process any remainder
-	 * in current window.
-	 */
-	window_start += (u64)nr_full_windows * (u64)window_size;
-
-	/* Process (wallclock - window_start) next */
-	mark_start = window_start;
-	runtime += add_to_task_demand(rq, p, wallclock - mark_start);
-
-	return runtime;
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
-{
-	/*
-	 * update_task_demand() has checks for idle task and
-	 * exit task. The runtime may include the wait time,
-	 * so update the burst only for the cases where the
-	 * task is running.
-	 */
-	if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
-				rq->curr == p))
-		p->ravg.curr_burst += runtime;
-}
-
-/* Reflect task activity on its demand and cpu's busy time statistics */
-void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
-						u64 wallclock, u64 irqtime)
-{
-	u64 runtime;
-
-	if (!rq->window_start || sched_disable_window_stats ||
-	    p->ravg.mark_start == wallclock)
-		return;
-
-	lockdep_assert_held(&rq->lock);
-
-	update_window_start(rq, wallclock);
-
-	if (!p->ravg.mark_start) {
-		update_task_cpu_cycles(p, cpu_of(rq));
-		goto done;
-	}
-
-	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
-	runtime = update_task_demand(p, rq, event, wallclock);
-	if (runtime)
-		update_task_burst(p, rq, event, runtime);
-	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
-	update_task_pred_demand(rq, p, event);
-done:
-	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
-				     rq->cc.cycles, rq->cc.time,
-				     p->grp ? &rq->grp_time : NULL);
-
-	p->ravg.mark_start = wallclock;
-}
-
-void sched_account_irqtime(int cpu, struct task_struct *curr,
-				 u64 delta, u64 wallclock)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long flags, nr_windows;
-	u64 cur_jiffies_ts;
-
-	raw_spin_lock_irqsave(&rq->lock, flags);
-
-	/*
-	 * cputime (wallclock) uses sched_clock so use the same here for
-	 * consistency.
-	 */
-	delta += sched_clock() - wallclock;
-	cur_jiffies_ts = get_jiffies_64();
-
-	if (is_idle_task(curr))
-		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
-				 delta);
-
-	nr_windows = cur_jiffies_ts - rq->irqload_ts;
-
-	if (nr_windows) {
-		if (nr_windows < 10) {
-			/* Decay CPU's irqload by 3/4 for each window. */
-			rq->avg_irqload *= (3 * nr_windows);
-			rq->avg_irqload = div64_u64(rq->avg_irqload,
-						    4 * nr_windows);
-		} else {
-			rq->avg_irqload = 0;
-		}
-		rq->avg_irqload += rq->cur_irqload;
-		rq->cur_irqload = 0;
-	}
-
-	rq->cur_irqload += delta;
-	rq->irqload_ts = cur_jiffies_ts;
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	if (!rq->window_start || sched_disable_window_stats)
-		return;
-
-	if (is_idle_task(curr)) {
-		/* We're here without rq->lock held, IRQ disabled */
-		raw_spin_lock(&rq->lock);
-		update_task_cpu_cycles(curr, cpu);
-		raw_spin_unlock(&rq->lock);
-	}
-}
-
-void reset_task_stats(struct task_struct *p)
-{
-	u32 sum = 0;
-	u32 *curr_window_ptr = NULL;
-	u32 *prev_window_ptr = NULL;
-
-	if (exiting_task(p)) {
-		sum = EXITING_TASK_MARKER;
-	} else {
-		curr_window_ptr =  p->ravg.curr_window_cpu;
-		prev_window_ptr = p->ravg.prev_window_cpu;
-		memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
-		memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
-	}
-
-	memset(&p->ravg, 0, sizeof(struct ravg));
-
-	p->ravg.curr_window_cpu = curr_window_ptr;
-	p->ravg.prev_window_cpu = prev_window_ptr;
-
-	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-
-	/* Retain EXITING_TASK marker */
-	p->ravg.sum_history[0] = sum;
-}
-
-void mark_task_starting(struct task_struct *p)
-{
-	u64 wallclock;
-	struct rq *rq = task_rq(p);
-
-	if (!rq->window_start || sched_disable_window_stats) {
-		reset_task_stats(p);
-		return;
-	}
-
-	wallclock = sched_ktime_clock();
-	p->ravg.mark_start = p->last_wake_ts = wallclock;
-	p->last_cpu_selected_ts = wallclock;
-	p->last_switch_out_ts = 0;
-	update_task_cpu_cycles(p, cpu_of(rq));
-}
-
-void set_window_start(struct rq *rq)
-{
-	static int sync_cpu_available;
-
-	if (rq->window_start)
-		return;
-
-	if (!sync_cpu_available) {
-		rq->window_start = sched_ktime_clock();
-		sync_cpu_available = 1;
-	} else {
-		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
-
-		raw_spin_unlock(&rq->lock);
-		double_rq_lock(rq, sync_rq);
-		rq->window_start = sync_rq->window_start;
-		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
-		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
-		raw_spin_unlock(&sync_rq->lock);
-	}
-
-	rq->curr->ravg.mark_start = rq->window_start;
-}
-
 static void reset_all_task_stats(void)
 {
 	struct task_struct *g, *p;
@@ -3078,59 +1236,6 @@
 		sched_ktime_clock() - start_ts, reason, old, new);
 }
 
-/*
- * In this function we match the accumulated subtractions with the current
- * and previous windows we are operating with. Ignore any entries where
- * the window start in the load_subtraction struct does not match either
- * the curent or the previous window. This could happen whenever CPUs
- * become idle or busy with interrupts disabled for an extended period.
- */
-static inline void account_load_subtractions(struct rq *rq)
-{
-	u64 ws = rq->window_start;
-	u64 prev_ws = ws - sched_ravg_window;
-	struct load_subtractions *ls = rq->load_subs;
-	int i;
-
-	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
-		if (ls[i].window_start == ws) {
-			rq->curr_runnable_sum -= ls[i].subs;
-			rq->nt_curr_runnable_sum -= ls[i].new_subs;
-		} else if (ls[i].window_start == prev_ws) {
-			rq->prev_runnable_sum -= ls[i].subs;
-			rq->nt_prev_runnable_sum -= ls[i].new_subs;
-		}
-
-		ls[i].subs = 0;
-		ls[i].new_subs = 0;
-	}
-
-	BUG_ON((s64)rq->prev_runnable_sum < 0);
-	BUG_ON((s64)rq->curr_runnable_sum < 0);
-	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
-	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
-}
-
-static inline u64 freq_policy_load(struct rq *rq, u64 load)
-{
-	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
-
-	switch (reporting_policy) {
-	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
-		load = max_t(u64, load, top_task_load(rq));
-		break;
-	case FREQ_REPORT_TOP_TASK:
-		load = top_task_load(rq);
-		break;
-	case FREQ_REPORT_CPU_LOAD:
-		break;
-	default:
-		break;
-	}
-
-	return load;
-}
-
 void sched_get_cpus_busy(struct sched_load *busy,
 			 const struct cpumask *query_cpus)
 {
@@ -3296,11 +1401,6 @@
 	}
 }
 
-void sched_set_io_is_busy(int val)
-{
-	sched_io_is_busy = val;
-}
-
 int sched_set_window(u64 window_start, unsigned int window_size)
 {
 	u64 now, cur_jiffies, jiffy_ktime_ns;
@@ -3350,289 +1450,6 @@
 	rq->load_subs[index].new_subs = 0;
 }
 
-static bool get_subtraction_index(struct rq *rq, u64 ws)
-{
-	int i;
-	u64 oldest = ULLONG_MAX;
-	int oldest_index = 0;
-
-	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
-		u64 entry_ws = rq->load_subs[i].window_start;
-
-		if (ws == entry_ws)
-			return i;
-
-		if (entry_ws < oldest) {
-			oldest = entry_ws;
-			oldest_index = i;
-		}
-	}
-
-	create_subtraction_entry(rq, ws, oldest_index);
-	return oldest_index;
-}
-
-static void update_rq_load_subtractions(int index, struct rq *rq,
-					u32 sub_load, bool new_task)
-{
-	rq->load_subs[index].subs +=  sub_load;
-	if (new_task)
-		rq->load_subs[index].new_subs += sub_load;
-}
-
-static void update_cluster_load_subtractions(struct task_struct *p,
-					int cpu, u64 ws, bool new_task)
-{
-	struct sched_cluster *cluster = cpu_cluster(cpu);
-	struct cpumask cluster_cpus = cluster->cpus;
-	u64 prev_ws = ws - sched_ravg_window;
-	int i;
-
-	cpumask_clear_cpu(cpu, &cluster_cpus);
-	raw_spin_lock(&cluster->load_lock);
-
-	for_each_cpu(i, &cluster_cpus) {
-		struct rq *rq = cpu_rq(i);
-		int index;
-
-		if (p->ravg.curr_window_cpu[i]) {
-			index = get_subtraction_index(rq, ws);
-			update_rq_load_subtractions(index, rq,
-				p->ravg.curr_window_cpu[i], new_task);
-			p->ravg.curr_window_cpu[i] = 0;
-		}
-
-		if (p->ravg.prev_window_cpu[i]) {
-			index = get_subtraction_index(rq, prev_ws);
-			update_rq_load_subtractions(index, rq,
-				p->ravg.prev_window_cpu[i], new_task);
-			p->ravg.prev_window_cpu[i] = 0;
-		}
-	}
-
-	raw_spin_unlock(&cluster->load_lock);
-}
-
-static inline void inter_cluster_migration_fixup
-	(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
-{
-	struct rq *dest_rq = cpu_rq(new_cpu);
-	struct rq *src_rq = cpu_rq(task_cpu);
-
-	if (same_freq_domain(new_cpu, task_cpu))
-		return;
-
-	p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
-	p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
-
-	dest_rq->curr_runnable_sum += p->ravg.curr_window;
-	dest_rq->prev_runnable_sum += p->ravg.prev_window;
-
-	src_rq->curr_runnable_sum -=  p->ravg.curr_window_cpu[task_cpu];
-	src_rq->prev_runnable_sum -=  p->ravg.prev_window_cpu[task_cpu];
-
-	if (new_task) {
-		dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
-		dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
-
-		src_rq->nt_curr_runnable_sum -=
-				p->ravg.curr_window_cpu[task_cpu];
-		src_rq->nt_prev_runnable_sum -=
-				p->ravg.prev_window_cpu[task_cpu];
-	}
-
-	p->ravg.curr_window_cpu[task_cpu] = 0;
-	p->ravg.prev_window_cpu[task_cpu] = 0;
-
-	update_cluster_load_subtractions(p, task_cpu,
-			src_rq->window_start, new_task);
-
-	BUG_ON((s64)src_rq->prev_runnable_sum < 0);
-	BUG_ON((s64)src_rq->curr_runnable_sum < 0);
-	BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
-	BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
-}
-
-static int get_top_index(unsigned long *bitmap, unsigned long old_top)
-{
-	int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
-
-	if (index == NUM_LOAD_INDICES)
-		return 0;
-
-	return NUM_LOAD_INDICES - 1 - index;
-}
-
-static void
-migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
-{
-	int index;
-	int top_index;
-	u32 curr_window = p->ravg.curr_window;
-	u32 prev_window = p->ravg.prev_window;
-	u8 src = src_rq->curr_table;
-	u8 dst = dst_rq->curr_table;
-	u8 *src_table;
-	u8 *dst_table;
-
-	if (curr_window) {
-		src_table = src_rq->top_tasks[src];
-		dst_table = dst_rq->top_tasks[dst];
-		index = load_to_index(curr_window);
-		src_table[index] -= 1;
-		dst_table[index] += 1;
-
-		if (!src_table[index])
-			__clear_bit(NUM_LOAD_INDICES - index - 1,
-				src_rq->top_tasks_bitmap[src]);
-
-		if (dst_table[index] == 1)
-			__set_bit(NUM_LOAD_INDICES - index - 1,
-				dst_rq->top_tasks_bitmap[dst]);
-
-		if (index > dst_rq->curr_top)
-			dst_rq->curr_top = index;
-
-		top_index = src_rq->curr_top;
-		if (index == top_index && !src_table[index])
-			src_rq->curr_top = get_top_index(
-				src_rq->top_tasks_bitmap[src], top_index);
-	}
-
-	if (prev_window) {
-		src = 1 - src;
-		dst = 1 - dst;
-		src_table = src_rq->top_tasks[src];
-		dst_table = dst_rq->top_tasks[dst];
-		index = load_to_index(prev_window);
-		src_table[index] -= 1;
-		dst_table[index] += 1;
-
-		if (!src_table[index])
-			__clear_bit(NUM_LOAD_INDICES - index - 1,
-				src_rq->top_tasks_bitmap[src]);
-
-		if (dst_table[index] == 1)
-			__set_bit(NUM_LOAD_INDICES - index - 1,
-				dst_rq->top_tasks_bitmap[dst]);
-
-		if (index > dst_rq->prev_top)
-			dst_rq->prev_top = index;
-
-		top_index = src_rq->prev_top;
-		if (index == top_index && !src_table[index])
-			src_rq->prev_top = get_top_index(
-				src_rq->top_tasks_bitmap[src], top_index);
-	}
-}
-
-void fixup_busy_time(struct task_struct *p, int new_cpu)
-{
-	struct rq *src_rq = task_rq(p);
-	struct rq *dest_rq = cpu_rq(new_cpu);
-	u64 wallclock;
-	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
-	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
-	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
-	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
-	bool new_task;
-	struct related_thread_group *grp;
-
-	if (!p->on_rq && p->state != TASK_WAKING)
-		return;
-
-	if (exiting_task(p)) {
-		clear_ed_task(p, src_rq);
-		return;
-	}
-
-	if (p->state == TASK_WAKING)
-		double_rq_lock(src_rq, dest_rq);
-
-	if (sched_disable_window_stats)
-		goto done;
-
-	wallclock = sched_ktime_clock();
-
-	update_task_ravg(task_rq(p)->curr, task_rq(p),
-			 TASK_UPDATE,
-			 wallclock, 0);
-	update_task_ravg(dest_rq->curr, dest_rq,
-			 TASK_UPDATE, wallclock, 0);
-
-	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
-			 wallclock, 0);
-
-	update_task_cpu_cycles(p, new_cpu);
-
-	new_task = is_new_task(p);
-	/* Protected by rq_lock */
-	grp = p->grp;
-
-	/*
-	 * For frequency aggregation, we continue to do migration fixups
-	 * even for intra cluster migrations. This is because, the aggregated
-	 * load has to reported on a single CPU regardless.
-	 */
-	if (grp && sched_freq_aggregate) {
-		struct group_cpu_time *cpu_time;
-
-		cpu_time = &src_rq->grp_time;
-		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
-		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
-		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
-		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
-		cpu_time = &dest_rq->grp_time;
-		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
-		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
-		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
-		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
-		if (p->ravg.curr_window) {
-			*src_curr_runnable_sum -= p->ravg.curr_window;
-			*dst_curr_runnable_sum += p->ravg.curr_window;
-			if (new_task) {
-				*src_nt_curr_runnable_sum -=
-							p->ravg.curr_window;
-				*dst_nt_curr_runnable_sum +=
-							p->ravg.curr_window;
-			}
-		}
-
-		if (p->ravg.prev_window) {
-			*src_prev_runnable_sum -= p->ravg.prev_window;
-			*dst_prev_runnable_sum += p->ravg.prev_window;
-			if (new_task) {
-				*src_nt_prev_runnable_sum -=
-							p->ravg.prev_window;
-				*dst_nt_prev_runnable_sum +=
-							p->ravg.prev_window;
-			}
-		}
-	} else {
-		inter_cluster_migration_fixup(p, new_cpu,
-						task_cpu(p), new_task);
-	}
-
-	migrate_top_tasks(p, src_rq, dest_rq);
-
-	if (!same_freq_domain(new_cpu, task_cpu(p))) {
-		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
-		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
-	}
-
-	if (p == src_rq->ed_task) {
-		src_rq->ed_task = NULL;
-		if (!dest_rq->ed_task)
-			dest_rq->ed_task = p;
-	}
-
-done:
-	if (p->state == TASK_WAKING)
-		double_rq_unlock(src_rq, dest_rq);
-}
-
 #define sched_up_down_migrate_auto_update 1
 static void check_for_up_down_migrate_update(const struct cpumask *cpus)
 {
@@ -3653,426 +1470,7 @@
 	update_up_down_migrate();
 }
 
-/* Return cluster which can offer required capacity for group */
-static struct sched_cluster *best_cluster(struct related_thread_group *grp,
-					u64 total_demand, bool group_boost)
-{
-	struct sched_cluster *cluster = NULL;
-
-	for_each_sched_cluster(cluster) {
-		if (group_will_fit(cluster, grp, total_demand, group_boost))
-			return cluster;
-	}
-
-	return sched_cluster[0];
-}
-
-static void _set_preferred_cluster(struct related_thread_group *grp)
-{
-	struct task_struct *p;
-	u64 combined_demand = 0;
-	bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
-	bool group_boost = false;
-	u64 wallclock;
-
-	if (list_empty(&grp->tasks))
-		return;
-
-	wallclock = sched_ktime_clock();
-
-	/*
-	 * wakeup of two or more related tasks could race with each other and
-	 * could result in multiple calls to _set_preferred_cluster being issued
-	 * at same time. Avoid overhead in such cases of rechecking preferred
-	 * cluster
-	 */
-	if (wallclock - grp->last_update < sched_ravg_window / 10)
-		return;
-
-	list_for_each_entry(p, &grp->tasks, grp_list) {
-		if (boost_on_big && task_sched_boost(p)) {
-			group_boost = true;
-			break;
-		}
-
-		if (p->ravg.mark_start < wallclock -
-		    (sched_ravg_window * sched_ravg_hist_size))
-			continue;
-
-		combined_demand += p->ravg.demand;
-
-	}
-
-	grp->preferred_cluster = best_cluster(grp,
-			combined_demand, group_boost);
-	grp->last_update = sched_ktime_clock();
-	trace_sched_set_preferred_cluster(grp, combined_demand);
-}
-
-void set_preferred_cluster(struct related_thread_group *grp)
-{
-	raw_spin_lock(&grp->lock);
-	_set_preferred_cluster(grp);
-	raw_spin_unlock(&grp->lock);
-}
-
-#define ADD_TASK	0
-#define REM_TASK	1
-
-#define DEFAULT_CGROUP_COLOC_ID 1
-
-/*
- * Task's cpu usage is accounted in:
- *	rq->curr/prev_runnable_sum,  when its ->grp is NULL
- *	grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
- *
- * Transfer task's cpu usage between those counters when transitioning between
- * groups
- */
-static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
-				struct task_struct *p, int event)
-{
-	u64 wallclock;
-	struct group_cpu_time *cpu_time;
-	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
-	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
-	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
-	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
-	int migrate_type;
-	int cpu = cpu_of(rq);
-	bool new_task;
-	int i;
-
-	if (!sched_freq_aggregate)
-		return;
-
-	wallclock = sched_ktime_clock();
-
-	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
-	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
-	new_task = is_new_task(p);
-
-	cpu_time = &rq->grp_time;
-	if (event == ADD_TASK) {
-		migrate_type = RQ_TO_GROUP;
-
-		src_curr_runnable_sum = &rq->curr_runnable_sum;
-		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
-		src_prev_runnable_sum = &rq->prev_runnable_sum;
-		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
-		src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
-		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
-		src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
-		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
-		*src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
-		*src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
-		if (new_task) {
-			*src_nt_curr_runnable_sum -=
-					p->ravg.curr_window_cpu[cpu];
-			*src_nt_prev_runnable_sum -=
-					p->ravg.prev_window_cpu[cpu];
-		}
-
-		update_cluster_load_subtractions(p, cpu,
-				rq->window_start, new_task);
-
-	} else {
-		migrate_type = GROUP_TO_RQ;
-
-		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
-		dst_curr_runnable_sum = &rq->curr_runnable_sum;
-		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
-		dst_prev_runnable_sum = &rq->prev_runnable_sum;
-
-		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
-		dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
-		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-		dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
-
-		*src_curr_runnable_sum -= p->ravg.curr_window;
-		*src_prev_runnable_sum -= p->ravg.prev_window;
-		if (new_task) {
-			*src_nt_curr_runnable_sum -= p->ravg.curr_window;
-			*src_nt_prev_runnable_sum -= p->ravg.prev_window;
-		}
-
-		/*
-		 * Need to reset curr/prev windows for all CPUs, not just the
-		 * ones in the same cluster. Since inter cluster migrations
-		 * did not result in the appropriate book keeping, the values
-		 * per CPU would be inaccurate.
-		 */
-		for_each_possible_cpu(i) {
-			p->ravg.curr_window_cpu[i] = 0;
-			p->ravg.prev_window_cpu[i] = 0;
-		}
-	}
-
-	*dst_curr_runnable_sum += p->ravg.curr_window;
-	*dst_prev_runnable_sum += p->ravg.prev_window;
-	if (new_task) {
-		*dst_nt_curr_runnable_sum += p->ravg.curr_window;
-		*dst_nt_prev_runnable_sum += p->ravg.prev_window;
-	}
-
-	/*
-	 * When a task enter or exits a group, it's curr and prev windows are
-	 * moved to a single CPU. This behavior might be sub-optimal in the
-	 * exit case, however, it saves us the overhead of handling inter
-	 * cluster migration fixups while the task is part of a related group.
-	 */
-	p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
-	p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
-
-	trace_sched_migration_update_sum(p, migrate_type, rq);
-
-	BUG_ON((s64)*src_curr_runnable_sum < 0);
-	BUG_ON((s64)*src_prev_runnable_sum < 0);
-	BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
-	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
-}
-
-static inline struct related_thread_group*
-lookup_related_thread_group(unsigned int group_id)
-{
-	return related_thread_groups[group_id];
-}
-
-int alloc_related_thread_groups(void)
-{
-	int i, ret;
-	struct related_thread_group *grp;
-
-	/* groupd_id = 0 is invalid as it's special id to remove group. */
-	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
-		grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
-		if (!grp) {
-			ret = -ENOMEM;
-			goto err;
-		}
-
-		grp->id = i;
-		INIT_LIST_HEAD(&grp->tasks);
-		INIT_LIST_HEAD(&grp->list);
-		raw_spin_lock_init(&grp->lock);
-
-		related_thread_groups[i] = grp;
-	}
-
-	return 0;
-
-err:
-	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
-		grp = lookup_related_thread_group(i);
-		if (grp) {
-			kfree(grp);
-			related_thread_groups[i] = NULL;
-		} else {
-			break;
-		}
-	}
-
-	return ret;
-}
-
-static void remove_task_from_group(struct task_struct *p)
-{
-	struct related_thread_group *grp = p->grp;
-	struct rq *rq;
-	int empty_group = 1;
-	struct rq_flags rf;
-
-	raw_spin_lock(&grp->lock);
-
-	rq = __task_rq_lock(p, &rf);
-	transfer_busy_time(rq, p->grp, p, REM_TASK);
-	list_del_init(&p->grp_list);
-	rcu_assign_pointer(p->grp, NULL);
-	__task_rq_unlock(rq, &rf);
-
-
-	if (!list_empty(&grp->tasks)) {
-		empty_group = 0;
-		_set_preferred_cluster(grp);
-	}
-
-	raw_spin_unlock(&grp->lock);
-
-	/* Reserved groups cannot be destroyed */
-	if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
-		 /*
-		  * We test whether grp->list is attached with list_empty()
-		  * hence re-init the list after deletion.
-		  */
-		list_del_init(&grp->list);
-}
-
-static int
-add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
-{
-	struct rq *rq;
-	struct rq_flags rf;
-
-	raw_spin_lock(&grp->lock);
-
-	/*
-	 * Change p->grp under rq->lock. Will prevent races with read-side
-	 * reference of p->grp in various hot-paths
-	 */
-	rq = __task_rq_lock(p, &rf);
-	transfer_busy_time(rq, grp, p, ADD_TASK);
-	list_add(&p->grp_list, &grp->tasks);
-	rcu_assign_pointer(p->grp, grp);
-	__task_rq_unlock(rq, &rf);
-
-	_set_preferred_cluster(grp);
-
-	raw_spin_unlock(&grp->lock);
-
-	return 0;
-}
-
-void add_new_task_to_grp(struct task_struct *new)
-{
-	unsigned long flags;
-	struct related_thread_group *grp;
-	struct task_struct *leader = new->group_leader;
-	unsigned int leader_grp_id = sched_get_group_id(leader);
-
-	if (!sysctl_sched_enable_thread_grouping &&
-	    leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
-		return;
-
-	if (thread_group_leader(new))
-		return;
-
-	if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
-		if (!same_schedtune(new, leader))
-			return;
-	}
-
-	write_lock_irqsave(&related_thread_group_lock, flags);
-
-	rcu_read_lock();
-	grp = task_related_thread_group(leader);
-	rcu_read_unlock();
-
-	/*
-	 * It's possible that someone already added the new task to the
-	 * group. A leader's thread group is updated prior to calling
-	 * this function. It's also possible that the leader has exited
-	 * the group. In either case, there is nothing else to do.
-	 */
-	if (!grp || new->grp) {
-		write_unlock_irqrestore(&related_thread_group_lock, flags);
-		return;
-	}
-
-	raw_spin_lock(&grp->lock);
-
-	rcu_assign_pointer(new->grp, grp);
-	list_add(&new->grp_list, &grp->tasks);
-
-	raw_spin_unlock(&grp->lock);
-	write_unlock_irqrestore(&related_thread_group_lock, flags);
-}
-
-static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
-{
-	int rc = 0;
-	unsigned long flags;
-	struct related_thread_group *grp = NULL;
-
-	if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
-		return -EINVAL;
-
-	raw_spin_lock_irqsave(&p->pi_lock, flags);
-	write_lock(&related_thread_group_lock);
-
-	/* Switching from one group to another directly is not permitted */
-	if ((current != p && p->flags & PF_EXITING) ||
-			(!p->grp && !group_id) ||
-			(p->grp && group_id))
-		goto done;
-
-	if (!group_id) {
-		remove_task_from_group(p);
-		goto done;
-	}
-
-	grp = lookup_related_thread_group(group_id);
-	if (list_empty(&grp->list))
-		list_add(&grp->list, &active_related_thread_groups);
-
-	rc = add_task_to_group(p, grp);
-done:
-	write_unlock(&related_thread_group_lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
-	return rc;
-}
-
-int sched_set_group_id(struct task_struct *p, unsigned int group_id)
-{
-	/* DEFAULT_CGROUP_COLOC_ID is a reserved id */
-	if (group_id == DEFAULT_CGROUP_COLOC_ID)
-		return -EINVAL;
-
-	return __sched_set_group_id(p, group_id);
-}
-
-unsigned int sched_get_group_id(struct task_struct *p)
-{
-	unsigned int group_id;
-	struct related_thread_group *grp;
-
-	rcu_read_lock();
-	grp = task_related_thread_group(p);
-	group_id = grp ? grp->id : 0;
-	rcu_read_unlock();
-
-	return group_id;
-}
-
-#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
-/*
- * We create a default colocation group at boot. There is no need to
- * synchronize tasks between cgroups at creation time because the
- * correct cgroup hierarchy is not available at boot. Therefore cgroup
- * colocation is turned off by default even though the colocation group
- * itself has been allocated. Furthermore this colocation group cannot
- * be destroyted once it has been created. All of this has been as part
- * of runtime optimizations.
- *
- * The job of synchronizing tasks to the colocation group is done when
- * the colocation flag in the cgroup is turned on.
- */
-static int __init create_default_coloc_group(void)
-{
-	struct related_thread_group *grp = NULL;
-	unsigned long flags;
-
-	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
-	write_lock_irqsave(&related_thread_group_lock, flags);
-	list_add(&grp->list, &active_related_thread_groups);
-	write_unlock_irqrestore(&related_thread_group_lock, flags);
-
-	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
-	return 0;
-}
-late_initcall(create_default_coloc_group);
-
-int sync_cgroup_colocation(struct task_struct *p, bool insert)
-{
-	unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
-
-	return __sched_set_group_id(p, grp_id);
-}
-#endif
-
-static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+void update_cpu_cluster_capacity(const cpumask_t *cpus)
 {
 	int i;
 	struct sched_cluster *cluster;
@@ -4120,66 +1518,6 @@
 		update_cpu_cluster_capacity(cpus);
 }
 
-static int cpufreq_notifier_policy(struct notifier_block *nb,
-		unsigned long val, void *data)
-{
-	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
-	struct sched_cluster *cluster = NULL;
-	struct cpumask policy_cluster = *policy->related_cpus;
-	unsigned int orig_max_freq = 0;
-	int i, j, update_capacity = 0;
-
-	if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
-						val != CPUFREQ_CREATE_POLICY)
-		return 0;
-
-	if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
-		update_min_max_capacity();
-		return 0;
-	}
-
-	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
-	if (min_max_freq == 1)
-		min_max_freq = UINT_MAX;
-	min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
-	BUG_ON(!min_max_freq);
-	BUG_ON(!policy->max);
-
-	for_each_cpu(i, &policy_cluster) {
-		cluster = cpu_rq(i)->cluster;
-		cpumask_andnot(&policy_cluster, &policy_cluster,
-						&cluster->cpus);
-
-		orig_max_freq = cluster->max_freq;
-		cluster->min_freq = policy->min;
-		cluster->max_freq = policy->max;
-		cluster->cur_freq = policy->cur;
-
-		if (!cluster->freq_init_done) {
-			mutex_lock(&cluster_lock);
-			for_each_cpu(j, &cluster->cpus)
-				cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
-						policy->related_cpus);
-			cluster->max_possible_freq = policy->cpuinfo.max_freq;
-			cluster->max_possible_capacity =
-				compute_max_possible_capacity(cluster);
-			cluster->freq_init_done = true;
-
-			sort_clusters();
-			update_all_clusters_stats();
-			mutex_unlock(&cluster_lock);
-			continue;
-		}
-
-		update_capacity += (orig_max_freq != cluster->max_freq);
-	}
-
-	if (update_capacity)
-		update_cpu_cluster_capacity(policy->related_cpus);
-
-	return 0;
-}
-
 static int cpufreq_notifier_trans(struct notifier_block *nb,
 		unsigned long val, void *data)
 {
@@ -4232,10 +1570,6 @@
 	return 0;
 }
 
-static struct notifier_block notifier_policy_block = {
-	.notifier_call = cpufreq_notifier_policy
-};
-
 static struct notifier_block notifier_trans_block = {
 	.notifier_call = cpufreq_notifier_trans
 };
@@ -4251,14 +1585,8 @@
 
 static int register_sched_callback(void)
 {
-	int ret;
-
-	ret = cpufreq_register_notifier(&notifier_policy_block,
-						CPUFREQ_POLICY_NOTIFIER);
-
-	if (!ret)
-		ret = cpufreq_register_notifier(&notifier_trans_block,
-						CPUFREQ_TRANSITION_NOTIFIER);
+	cpufreq_register_notifier(&notifier_trans_block,
+				  CPUFREQ_TRANSITION_NOTIFIER);
 
 	register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);
 
@@ -4273,49 +1601,6 @@
  */
 core_initcall(register_sched_callback);
 
-int update_preferred_cluster(struct related_thread_group *grp,
-		struct task_struct *p, u32 old_load)
-{
-	u32 new_load = task_load(p);
-
-	if (!grp)
-		return 0;
-
-	/*
-	 * Update if task's load has changed significantly or a complete window
-	 * has passed since we last updated preference
-	 */
-	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
-		sched_ktime_clock() - grp->last_update > sched_ravg_window)
-		return 1;
-
-	return 0;
-}
-
-bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
-	struct task_struct *p;
-	int loop_max = 10;
-
-	if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
-		return 0;
-
-	rq->ed_task = NULL;
-	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
-		if (!loop_max)
-			break;
-
-		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
-			rq->ed_task = p;
-			return 1;
-		}
-
-		loop_max--;
-	}
-
-	return 0;
-}
-
 void update_avg_burst(struct task_struct *p)
 {
 	update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 13c8818..b852cbe 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,7 +78,7 @@
 {
 }
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 static void
 fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
 			   u32 new_task_load, u32 new_pred_demand)
@@ -114,7 +114,7 @@
 	.prio_changed		= prio_changed_idle,
 	.switched_to		= switched_to_idle,
 	.update_curr		= update_curr_idle,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_idle,
 #endif
 };
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 709f719..2703e0d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -4,12 +4,13 @@
  */
 
 #include "sched.h"
+#include "walt.h"
 
 #include <linux/slab.h>
 #include <linux/irq_work.h>
 #include <trace/events/sched.h>
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 
 static void
 inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
@@ -37,6 +38,7 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+#ifdef CONFIG_SCHED_HMP
 static int
 select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
@@ -50,8 +52,9 @@
 
 	return cpu;
 }
+#endif /* CONFIG_SCHED_HMP */
 #endif /* CONFIG_SMP */
-#else  /* CONFIG_SCHED_HMP */
+#else  /* CONFIG_SCHED_WALT */
 
 static inline void
 inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
@@ -1527,9 +1530,10 @@
 	 * This test is optimistic, if we get it wrong the load-balancer
 	 * will have to sort it out.
 	 */
-	if (curr && unlikely(rt_task(curr)) &&
-	    (tsk_nr_cpus_allowed(curr) < 2 ||
-	     curr->prio <= p->prio)) {
+	if (energy_aware() ||
+	    (curr && unlikely(rt_task(curr)) &&
+	     (tsk_nr_cpus_allowed(curr) < 2 ||
+	      curr->prio <= p->prio))) {
 		int target = find_lowest_rq(p);
 
 		/*
@@ -1820,12 +1824,35 @@
 }
 #endif	/* CONFIG_SCHED_HMP */
 
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+		u64 demand = p->ravg.demand;
+
+		return (demand << 10) / sched_ravg_window;
+	}
+#endif
+	return p->se.avg.util_avg;
+}
+
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
+	struct sched_group *sg, *sg_target;
 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
 	int this_cpu = smp_processor_id();
-	int cpu      = task_cpu(task);
+	int cpu, best_cpu;
+	struct cpumask search_cpu, backup_search_cpu;
+	unsigned long cpu_capacity;
+	unsigned long best_capacity;
+	unsigned long util, best_cpu_util = ULONG_MAX;
+	int best_cpu_idle_idx = INT_MAX;
+	int cpu_idle_idx = -1;
+	long new_util_cum;
+	int max_spare_cap_cpu = -1;
+	long max_spare_cap = -LONG_MAX;
+	bool placement_boost;
 
 #ifdef CONFIG_SCHED_HMP
 	return find_lowest_rq_hmp(task);
@@ -1841,6 +1868,117 @@
 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 		return -1; /* No targets found */
 
+	if (energy_aware() && sysctl_sched_is_big_little) {
+		sg_target = NULL;
+		best_cpu = -1;
+
+		/*
+		 * Since this code is inside sched_is_big_little, we are going
+		 * to assume that boost policy is SCHED_BOOST_ON_BIG
+		 */
+		placement_boost = sched_boost() == FULL_THROTTLE_BOOST;
+		best_capacity = placement_boost ? 0 : ULONG_MAX;
+
+		rcu_read_lock();
+		sd = rcu_dereference(per_cpu(sd_ea, task_cpu(task)));
+		if (!sd) {
+			rcu_read_unlock();
+			goto noea;
+		}
+
+		sg = sd->groups;
+		do {
+			cpu = group_first_cpu(sg);
+			cpu_capacity = capacity_orig_of(cpu);
+
+			if (unlikely(placement_boost)) {
+				if (cpu_capacity > best_capacity) {
+					best_capacity = cpu_capacity;
+					sg_target = sg;
+				}
+			} else {
+				if (cpu_capacity < best_capacity) {
+					best_capacity = cpu_capacity;
+					sg_target = sg;
+				}
+			}
+		} while (sg = sg->next, sg != sd->groups);
+		rcu_read_unlock();
+
+		cpumask_and(&search_cpu, lowest_mask,
+			    sched_group_cpus(sg_target));
+		cpumask_copy(&backup_search_cpu, lowest_mask);
+		cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
+			       &search_cpu);
+
+retry:
+		for_each_cpu(cpu, &search_cpu) {
+			/*
+			 * Don't use capcity_curr_of() since it will
+			 * double count rt task load.
+			 */
+			util = cpu_util(cpu);
+			if (!cpu_overutilized(cpu)) {
+				if (cpu_isolated(cpu))
+					continue;
+
+				if (sched_cpu_high_irqload(cpu))
+					continue;
+
+				new_util_cum = cpu_util_cum(cpu, 0);
+
+				if (!task_in_cum_window_demand(cpu_rq(cpu),
+							       task))
+					new_util_cum += task_util(task);
+
+				trace_sched_cpu_util(task, cpu, task_util(task),
+						     0, new_util_cum, 0);
+
+				if (sysctl_sched_cstate_aware)
+					cpu_idle_idx =
+					    (cpu == smp_processor_id() ||
+					     cpu_rq(cpu)->nr_running) ?
+					     -1 :
+					     idle_get_state_idx(cpu_rq(cpu));
+
+				if (add_capacity_margin(new_util_cum) <
+				    capacity_curr_of(cpu)) {
+					if (cpu_idle_idx < best_cpu_idle_idx ||
+					    (best_cpu != task_cpu(task) &&
+					     (best_cpu_idle_idx ==
+					      cpu_idle_idx &&
+					      best_cpu_util > util))) {
+						best_cpu_util = util;
+						best_cpu = cpu;
+						best_cpu_idle_idx =
+						    cpu_idle_idx;
+					}
+				} else {
+					long spare_cap = capacity_of(cpu) -
+							 util;
+
+					if (spare_cap > 0 &&
+					    max_spare_cap < spare_cap) {
+						max_spare_cap_cpu = cpu;
+						max_spare_cap = spare_cap;
+					}
+				}
+			}
+		}
+
+		if (best_cpu != -1) {
+			return best_cpu;
+		} else if (max_spare_cap_cpu != -1) {
+			return max_spare_cap_cpu;
+		} else if (!cpumask_empty(&backup_search_cpu)) {
+			cpumask_copy(&search_cpu, &backup_search_cpu);
+			cpumask_clear(&backup_search_cpu);
+			goto retry;
+		}
+	}
+
+noea:
+	cpu = task_cpu(task);
 	/*
 	 * At this point we have built a mask of cpus representing the
 	 * lowest priority tasks in the system.  Now we want to elect
@@ -2563,7 +2701,7 @@
 	.switched_to		= switched_to_rt,
 
 	.update_curr		= update_curr_rt,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_rt,
 #endif
 };
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5e25011..29b6e3d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,9 +25,8 @@
 struct rq;
 struct cpuidle_state;
 
-#ifdef CONFIG_SCHED_HMP
-#define NUM_TRACKED_WINDOWS 2
-#define NUM_LOAD_INDICES 1000
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sched_ravg_window;
 
 struct hmp_sched_stats {
 	int nr_big_tasks;
@@ -35,10 +34,9 @@
 	u64 pred_demands_sum;
 };
 
-struct load_subtractions {
-	u64 window_start;
-	u64 subs;
-	u64 new_subs;
+struct cpu_cycle {
+	u64 cycles;
+	u64 time;
 };
 
 struct group_cpu_time {
@@ -48,6 +46,15 @@
 	u64 nt_prev_runnable_sum;
 };
 
+struct load_subtractions {
+	u64 window_start;
+	u64 subs;
+	u64 new_subs;
+};
+
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
 struct sched_cluster {
 	raw_spinlock_t load_lock;
 	struct list_head list;
@@ -72,15 +79,13 @@
 	unsigned int static_cluster_pwr_cost;
 	int notifier_sent;
 	bool wake_up_idle;
-};
-
-struct cpu_cycle {
-	u64 cycles;
-	u64 time;
+	u64 aggr_grp_load;
 };
 
 extern unsigned int sched_disable_window_stats;
-#endif /* CONFIG_SCHED_HMP */
+
+extern struct timer_list sched_grp_timer;
+#endif /* CONFIG_SCHED_WALT */
 
 
 /* task_struct::on_rq states: */
@@ -507,7 +512,7 @@
 #endif
 
 #ifdef CONFIG_CFS_BANDWIDTH
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	struct hmp_sched_stats hmp_stats;
 #endif
 
@@ -764,13 +769,14 @@
 	u64 max_idle_balance_cost;
 #endif
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	struct sched_cluster *cluster;
 	struct cpumask freq_domain_cpumask;
 	struct hmp_sched_stats hmp_stats;
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
+	s64 cum_window_start;
 	u64 load_reported_window;
 	unsigned long hmp_flags;
 
@@ -786,6 +792,7 @@
 	u64 prev_runnable_sum;
 	u64 nt_curr_runnable_sum;
 	u64 nt_prev_runnable_sum;
+	u64 cum_window_demand;
 	struct group_cpu_time grp_time;
 	struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
 	DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
@@ -794,6 +801,7 @@
 	u8 curr_table;
 	int prev_top;
 	int curr_top;
+	struct irq_work irq_work;
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1421,7 +1429,7 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	void (*task_change_group) (struct task_struct *p, int type);
 #endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
 				      u32 new_task_load, u32 new_pred_demand);
 #endif
@@ -1458,6 +1466,8 @@
 
 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
 
+bool cpu_overutilized(int cpu);
+
 #endif
 
 #ifdef CONFIG_CPU_IDLE
@@ -1702,7 +1712,6 @@
 }
 
 extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int walt_ravg_window;
 extern unsigned int walt_disabled;
 
 /*
@@ -1733,13 +1742,14 @@
  */
 static inline unsigned long __cpu_util(int cpu, int delta)
 {
-	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+	u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
 	unsigned long capacity = capacity_orig_of(cpu);
 
 #ifdef CONFIG_SCHED_WALT
 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
-		util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
-		do_div(util, walt_ravg_window);
+		util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg;
+		util = div64_u64(util,
+				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
 	}
 #endif
 	delta += util;
@@ -1754,11 +1764,76 @@
 	return __cpu_util(cpu, 0);
 }
 
+struct sched_walt_cpu_load {
+	unsigned long prev_window_util;
+	unsigned long nl;
+	unsigned long pl;
+};
+
+static inline unsigned long cpu_util_cum(int cpu, int delta)
+{
+	u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
+	unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+		util = cpu_rq(cpu)->cum_window_demand;
+		util = div64_u64(util,
+				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+	}
 #endif
+	delta += util;
+	if (delta < 0)
+		return 0;
+
+	return (delta >= capacity) ? capacity : delta;
+}
+
+#ifdef CONFIG_SCHED_WALT
+u64 freq_policy_load(struct rq *rq);
+#endif
+
+static inline unsigned long
+cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
+{
+	struct rq *rq = cpu_rq(cpu);
+	u64 util = rq->cfs.avg.util_avg;
+	unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+
+		util = freq_policy_load(rq);
+		util = div64_u64(util,
+				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+
+		if (walt_load) {
+			u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
+				rq->grp_time.nt_prev_runnable_sum;
+
+			nl = div64_u64(nl, sched_ravg_window >>
+						SCHED_CAPACITY_SHIFT);
+
+			walt_load->prev_window_util = util;
+			walt_load->nl = nl;
+		}
+	}
+#endif
+	return (util >= capacity) ? capacity : util;
+}
+#endif
+
+extern unsigned int capacity_margin_freq;
+
+static inline unsigned long add_capacity_margin(unsigned long cpu_capacity)
+{
+	cpu_capacity  = cpu_capacity * capacity_margin_freq;
+	cpu_capacity /= SCHED_CAPACITY_SCALE;
+	return cpu_capacity;
+}
 
 #ifdef CONFIG_CPU_FREQ_GOV_SCHED
 #define capacity_max SCHED_CAPACITY_SCALE
-extern unsigned int capacity_margin;
 extern struct static_key __sched_freq;
 
 static inline bool sched_freq(void)
@@ -2144,7 +2219,7 @@
 {
 	struct update_util_data *data;
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
 	 * migration
@@ -2156,9 +2231,10 @@
 	rq->load_reported_window = rq->window_start;
 #endif
 
-	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+					cpu_of(rq)));
 	if (data)
-		data->func(data, rq_clock(rq), flags);
+		data->func(data, sched_clock(), flags);
 }
 
 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
@@ -2179,7 +2255,7 @@
 #define arch_scale_freq_invariant()	(false)
 #endif
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 
 static inline int cluster_first_cpu(struct sched_cluster *cluster)
 {
@@ -2217,11 +2293,9 @@
 #define IRQLOAD_MIGRATION	3
 
 extern struct mutex policy_mutex;
-extern unsigned int sched_ravg_window;
 extern unsigned int sched_disable_window_stats;
 extern unsigned int max_possible_freq;
 extern unsigned int min_max_freq;
-extern unsigned int pct_task_load(struct task_struct *p);
 extern unsigned int max_possible_efficiency;
 extern unsigned int min_possible_efficiency;
 extern unsigned int max_capacity;
@@ -2245,37 +2319,14 @@
 extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
 extern unsigned int  __read_mostly sched_load_granule;
 
-extern void init_new_task_load(struct task_struct *p, bool idle_task);
 extern u64 sched_ktime_clock(void);
-extern int got_boost_kick(void);
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
-						u64 wallclock, u64 irqtime);
-extern bool early_detection_notify(struct rq *rq, u64 wallclock);
-extern void clear_ed_task(struct task_struct *p, struct rq *rq);
-extern void fixup_busy_time(struct task_struct *p, int new_cpu);
-extern void clear_boost_kick(int cpu);
-extern void clear_hmp_request(int cpu);
-extern void mark_task_starting(struct task_struct *p);
-extern void set_window_start(struct rq *rq);
-extern void update_cluster_topology(void);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-extern void init_clusters(void);
 extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
-extern unsigned int max_task_load(void);
-extern void sched_account_irqtime(int cpu, struct task_struct *curr,
-				 u64 delta, u64 wallclock);
-extern void sched_account_irqstart(int cpu, struct task_struct *curr,
-				   u64 wallclock);
-extern unsigned int cpu_temp(int cpu);
-extern unsigned int nr_eligible_big_tasks(int cpu);
 extern int update_preferred_cluster(struct related_thread_group *grp,
 			struct task_struct *p, u32 old_load);
 extern void set_preferred_cluster(struct related_thread_group *grp);
 extern void add_new_task_to_grp(struct task_struct *new);
 extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg_burst(struct task_struct *p);
 extern void update_avg(u64 *avg, u64 sample);
 
 #define NO_BOOST 0
@@ -2283,11 +2334,6 @@
 #define CONSERVATIVE_BOOST 2
 #define RESTRAINED_BOOST 3
 
-static inline struct sched_cluster *cpu_cluster(int cpu)
-{
-	return cpu_rq(cpu)->cluster;
-}
-
 static inline int cpu_capacity(int cpu)
 {
 	return cpu_rq(cpu)->cluster->capacity;
@@ -2308,11 +2354,6 @@
 	return cpu_rq(cpu)->cluster->efficiency;
 }
 
-static inline unsigned int cpu_cur_freq(int cpu)
-{
-	return cpu_rq(cpu)->cluster->cur_freq;
-}
-
 static inline unsigned int cpu_min_freq(int cpu)
 {
 	return cpu_rq(cpu)->cluster->min_freq;
@@ -2338,9 +2379,60 @@
 	return cpu_rq(cpu)->cluster->max_possible_freq;
 }
 
-static inline int same_cluster(int src_cpu, int dst_cpu)
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static inline void __update_min_max_capacity(void)
 {
-	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+	int i;
+	int max_cap = 0, min_cap = INT_MAX;
+
+	for_each_online_cpu(i) {
+		max_cap = max(max_cap, cpu_capacity(i));
+		min_cap = min(min_cap, cpu_capacity(i));
+	}
+
+	max_capacity = max_cap;
+	min_capacity = min_cap;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static inline unsigned long
+load_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_efficiency,
+			    cluster->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_freq,
+			   cluster_max_freq(cluster));
+}
+
+static inline int compute_load_scale_factor(struct sched_cluster *cluster)
+{
+	int load_scale = 1024;
+
+	/*
+	 * load_scale_factor accounts for the fact that task load
+	 * is in reference to "best" performing cpu. Task's load will need to be
+	 * scaled (up) by a factor to determine suitability to be placed on a
+	 * (little) cpu.
+	 */
+	load_scale *= load_scale_cpu_efficiency(cluster);
+	load_scale >>= 10;
+
+	load_scale *= load_scale_cpu_freq(cluster);
+	load_scale >>= 10;
+
+	return load_scale;
 }
 
 static inline int cpu_max_power_cost(int cpu)
@@ -2353,11 +2445,6 @@
 	return cpu_rq(cpu)->cluster->min_power_cost;
 }
 
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
-{
-	return div64_u64(cycles, period);
-}
-
 static inline bool hmp_capable(void)
 {
 	return max_possible_capacity != min_max_possible_capacity;
@@ -2380,91 +2467,49 @@
 	return task_load;
 }
 
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long
+capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return (1024 * cluster->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
+}
+
+static inline int compute_capacity(struct sched_cluster *cluster)
+{
+	int capacity = 1024;
+
+	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity >>= 10;
+
+	capacity *= capacity_scale_cpu_freq(cluster);
+	capacity >>= 10;
+
+	return capacity;
+}
+
 static inline unsigned int task_load(struct task_struct *p)
 {
 	return p->ravg.demand;
 }
 
-static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				 struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg += task_load;
-	stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg -= task_load;
-
-	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
-	stats->pred_demands_sum -= p->ravg.pred_demand;
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-			      struct task_struct *p, s64 task_load_delta,
-			      s64 pred_demand_delta)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	stats->cumulative_runnable_avg += task_load_delta;
-	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
-	stats->pred_demands_sum += pred_demand_delta;
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
 #define pct_to_real(tunable)	\
 		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
 
 #define real_to_pct(tunable)	\
 		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
 
-#define SCHED_HIGH_IRQ_TIMEOUT 3
-static inline u64 sched_irqload(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	s64 delta;
-
-	delta = get_jiffies_64() - rq->irqload_ts;
-	/*
-	 * Current context can be preempted by irq and rq->irqload_ts can be
-	 * updated by irq context so that delta can be negative.
-	 * But this is okay and we can safely return as this means there
-	 * was recent irq occurrence.
-	 */
-
-	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
-		return rq->avg_irqload;
-	else
-		return 0;
-}
-
-static inline int sched_cpu_high_irqload(int cpu)
-{
-	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
-}
-
 static inline bool task_in_related_thread_group(struct task_struct *p)
 {
 	return !!(rcu_access_pointer(p->grp) != NULL);
@@ -2478,12 +2523,6 @@
 
 #define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
 
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-
-extern void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p);
-
 /* Is frequency of two cpus synchronized with each other? */
 static inline int same_freq_domain(int src_cpu, int dst_cpu)
 {
@@ -2498,28 +2537,6 @@
 #define	BOOST_KICK	0
 #define	CPU_RESERVED	1
 
-static inline int is_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	return test_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline int mark_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	/* Name boost_flags as hmp_flags? */
-	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline void clear_reserved(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
 static inline u64 cpu_cravg_sync(int cpu, int sync)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -2542,43 +2559,24 @@
 	return load;
 }
 
-static inline bool is_short_burst_task(struct task_struct *p)
-{
-	return p->ravg.avg_burst < sysctl_sched_short_burst &&
-	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
 extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern unsigned int power_cost(int cpu, u64 demand);
 extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
 extern int sched_boost(void);
 extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
 					enum sched_boost_policy boost_policy);
-extern enum sched_boost_policy sched_boost_policy(void);
 extern int task_will_fit(struct task_struct *p, int cpu);
 extern u64 cpu_load(int cpu);
 extern u64 cpu_load_sync(int cpu, int sync);
 extern int preferred_cluster(struct sched_cluster *cluster,
 						struct task_struct *p);
-extern void inc_nr_big_task(struct hmp_sched_stats *stats,
-					struct task_struct *p);
-extern void dec_nr_big_task(struct hmp_sched_stats *stats,
-					struct task_struct *p);
 extern void inc_rq_hmp_stats(struct rq *rq,
 				struct task_struct *p, int change_cra);
 extern void dec_rq_hmp_stats(struct rq *rq,
 				struct task_struct *p, int change_cra);
 extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int is_big_task(struct task_struct *p);
 extern int upmigrate_discouraged(struct task_struct *p);
 extern struct sched_cluster *rq_cluster(struct rq *rq);
 extern int nr_big_tasks(struct rq *rq);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
-					struct task_struct *p, s64 delta);
 extern void reset_task_stats(struct task_struct *p);
 extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
 extern void inc_hmp_sched_stats_fair(struct rq *rq,
@@ -2587,7 +2585,6 @@
 					struct cftype *cft);
 extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
 				struct cftype *cft, u64 upmigrate_discourage);
-extern void sched_boost_parse_dt(void);
 extern void clear_top_tasks_bitmap(unsigned long *bitmap);
 
 #if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
@@ -2617,57 +2614,85 @@
 
 extern unsigned long all_cluster_ids[];
 
-#else	/* CONFIG_SCHED_HMP */
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+
+static inline int is_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline bool
+__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+	return (p->on_rq || p->last_sleep_ts >= rq->window_start);
+}
+
+static inline bool
+task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+	return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
+}
+
+static inline void
+dec_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+	rq->cum_window_demand -= p->ravg.demand;
+	WARN_ON_ONCE(rq->cum_window_demand < 0);
+}
+
+static inline void
+inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
+{
+	rq->cum_window_demand += delta;
+}
+
+extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
+
+extern unsigned long thermal_cap(int cpu);
+
+extern void clear_hmp_request(int cpu);
+
+extern int got_boost_kick(void);
+extern void clear_boost_kick(int cpu);
+extern enum sched_boost_policy sched_boost_policy(void);
+extern void sched_boost_parse_dt(void);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+
+#else	/* CONFIG_SCHED_WALT */
 
 struct hmp_sched_stats;
 struct related_thread_group;
 struct sched_cluster;
 
-static inline enum sched_boost_policy sched_boost_policy(void)
-{
-	return SCHED_BOOST_NONE;
-}
-
 static inline bool task_sched_boost(struct task_struct *p)
 {
 	return true;
 }
 
-static inline int got_boost_kick(void)
-{
-	return 0;
-}
-
-static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
-				int event, u64 wallclock, u64 irqtime) { }
-
-static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
-	return 0;
-}
-
-static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
-static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static inline void clear_boost_kick(int cpu) { }
-static inline void clear_hmp_request(int cpu) { }
-static inline void mark_task_starting(struct task_struct *p) { }
-static inline void set_window_start(struct rq *rq) { }
-static inline void init_clusters(void) {}
-static inline void update_cluster_topology(void) { }
-static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void set_task_last_switch_out(struct task_struct *p,
-					    u64 wallclock) { }
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
 
 static inline int task_will_fit(struct task_struct *p, int cpu)
 {
 	return 1;
 }
 
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
 static inline int sched_boost(void)
 {
 	return 0;
@@ -2693,11 +2718,6 @@
 	return 0;
 }
 
-static inline unsigned int cpu_temp(int cpu)
-{
-	return 0;
-}
-
 static inline void
 inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
 
@@ -2715,51 +2735,16 @@
 	return NULL;
 }
 
-static inline void init_new_task_load(struct task_struct *p, bool idle_task)
-{
-}
-
 static inline u64 scale_load_to_cpu(u64 load, int cpu)
 {
 	return load;
 }
 
-static inline unsigned int nr_eligible_big_tasks(int cpu)
-{
-	return 0;
-}
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
 static inline int cpu_capacity(int cpu)
 {
 	return SCHED_CAPACITY_SCALE;
 }
 
-static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
-
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-		 struct task_struct *p)
-{
-}
-
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-		 struct task_struct *p)
-{
-}
-
-static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
-				 u64 delta, u64 wallclock)
-{
-}
-
-static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
-					  u64 wallclock)
-{
-}
-
-static inline int sched_cpu_high_irqload(int cpu) { return 0; }
-
 static inline void set_preferred_cluster(struct related_thread_group *grp) { }
 
 static inline bool task_in_related_thread_group(struct task_struct *p)
@@ -2785,24 +2770,12 @@
 
 #define PRED_DEMAND_DELTA (0)
 
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p) { }
-
 static inline int same_freq_domain(int src_cpu, int dst_cpu)
 {
 	return 1;
 }
 
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline void pre_big_task_count_change(void) { }
-static inline void post_big_task_count_change(void) { }
-static inline void set_hmp_defaults(void) { }
-
 static inline void clear_reserved(int cpu) { }
-static inline void sched_boost_parse_dt(void) {}
 static inline int alloc_related_thread_groups(void) { return 0; }
 
 #define trace_sched_cpu_load(...)
@@ -2810,6 +2783,140 @@
 #define trace_sched_cpu_load_cgroup(...)
 #define trace_sched_cpu_load_wakeup(...)
 
-static inline void update_avg_burst(struct task_struct *p) {}
+static inline bool
+__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+	return 0;
+}
 
-#endif	/* CONFIG_SCHED_HMP */
+static inline void
+dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
+
+static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
+
+#ifdef CONFIG_SMP
+static inline unsigned long thermal_cap(int cpu)
+{
+	return cpu_rq(cpu)->cpu_capacity_orig;
+}
+#endif
+
+static inline void clear_hmp_request(int cpu) { }
+
+static inline int got_boost_kick(void)
+{
+	return 0;
+}
+
+static inline void clear_boost_kick(int cpu) { }
+
+static inline enum sched_boost_policy sched_boost_policy(void)
+{
+	return SCHED_BOOST_NONE;
+}
+
+static inline void sched_boost_parse_dt(void) { }
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_SCHED_WALT */
+
+#ifdef CONFIG_SCHED_HMP
+#define energy_aware() false
+
+extern int is_big_task(struct task_struct *p);
+extern unsigned int pct_task_load(struct task_struct *p);
+extern void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p);
+extern void note_task_waking(struct task_struct *p, u64 wallclock);
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+					struct task_struct *p, s64 delta);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern unsigned int cpu_temp(int cpu);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern void update_avg_burst(struct task_struct *p);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+
+extern unsigned int nr_eligible_big_tasks(int cpu);
+
+static inline void
+inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks++;
+}
+
+static inline void
+dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks--;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+static inline bool is_short_burst_task(struct task_struct *p)
+{
+	return p->ravg.avg_burst < sysctl_sched_short_burst &&
+	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
+}
+
+#else
+static inline bool energy_aware(void)
+{
+	return sched_feat(ENERGY_AWARE);
+}
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p) { }
+
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+				      struct task_struct *p, s64 delta) { }
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+	return 0;
+}
+
+static inline void pre_big_task_count_change(const struct cpumask *cpus) { }
+
+static inline void post_big_task_count_change(const struct cpumask *cpus) { }
+
+static inline void set_hmp_defaults(void) { }
+
+static inline void update_avg_burst(struct task_struct *p) { }
+
+static inline void set_task_last_switch_out(struct task_struct *p,
+					    u64 wallclock) { }
+
+#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 29d8a26..f820094 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/math64.h>
 
 #include "sched.h"
+#include "walt.h"
 #include <trace/events/sched.h>
 
 static DEFINE_PER_CPU(u64, nr_prod_sum);
@@ -127,3 +128,35 @@
 	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
 }
 EXPORT_SYMBOL(sched_update_nr_prod);
+
+/*
+ * Returns the CPU utilization % in the last window.
+ *
+ */
+unsigned int sched_get_cpu_util(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	u64 util = 0;
+	unsigned long capacity = SCHED_CAPACITY_SCALE, flags;
+	unsigned int busy;
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+
+#ifdef CONFIG_SMP
+	util = rq->cfs.avg.util_avg;
+	capacity = capacity_orig_of(cpu);
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+		util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+		util = div64_u64(util,
+				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+	}
+#endif
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	util = (util >= capacity) ? capacity : util;
+	busy = (util * 100) / capacity;
+	return busy;
+}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index a440769..dcc4a36 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -18,7 +18,7 @@
 }
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 
 static void
 inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
@@ -43,7 +43,7 @@
 				      pred_demand_delta);
 }
 
-#else	/* CONFIG_SCHED_HMP */
+#else	/* CONFIG_SCHED_WALT */
 
 static inline void
 inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
@@ -51,7 +51,7 @@
 static inline void
 dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
 
-#endif	/* CONFIG_SCHED_HMP */
+#endif	/* CONFIG_SCHED_WALT */
 
 static void
 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
@@ -172,7 +172,7 @@
 	.prio_changed		= prio_changed_stop,
 	.switched_to		= switched_to_stop,
 	.update_curr		= update_curr_stop,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_stop,
 #endif
 };
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 5e5811c..bae3b2b 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -122,7 +122,7 @@
 	/* Boost value for tasks on that SchedTune CGroup */
 	int boost;
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	/* Toggle ability to override sched boost enabled */
 	bool sched_boost_no_override;
 
@@ -147,7 +147,7 @@
 
 	/* Controls whether further updates are allowed to the colocate flag */
 	bool colocate_update_disabled;
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
 
 	/* Performance Boost (B) region threshold params */
 	int perf_boost_idx;
@@ -187,7 +187,7 @@
 static struct schedtune
 root_schedtune = {
 	.boost	= 0,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	.sched_boost_no_override = false,
 	.sched_boost_enabled = true,
 	.sched_boost_enabled_backup = true,
@@ -274,7 +274,7 @@
 /* Boost groups affecting each CPU in the system */
 DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 static inline void init_sched_boost(struct schedtune *st)
 {
 	st->sched_boost_no_override = false;
@@ -343,7 +343,7 @@
 	return 0;
 }
 
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
 
 static void
 schedtune_cpu_update(int cpu)
@@ -548,7 +548,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
 			struct cftype *cft)
 {
@@ -589,11 +589,11 @@
 	return 0;
 }
 
-#else /* CONFIG_SCHED_HMP */
+#else /* CONFIG_SCHED_WALT */
 
 static inline void init_sched_boost(struct schedtune *st) { }
 
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
 
 void schedtune_cancel_attach(struct cgroup_taskset *tset)
 {
@@ -729,7 +729,7 @@
 	return st->boost;
 }
 
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 static void schedtune_attach(struct cgroup_taskset *tset)
 {
 	struct task_struct *task;
@@ -786,7 +786,7 @@
 }
 
 static struct cftype files[] = {
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 	{
 		.name = "sched_boost_no_override",
 		.read_u64 = sched_boost_override_read,
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 1b4bb23..58854b0 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,38 +21,158 @@
 
 #include <linux/syscore_ops.h>
 #include <linux/cpufreq.h>
+#include <linux/list_sort.h>
+#include <linux/jiffies.h>
+#include <linux/sched/core_ctl.h>
 #include <trace/events/sched.h>
 #include "sched.h"
 #include "walt.h"
 
-#define WINDOW_STATS_RECENT		0
-#define WINDOW_STATS_MAX		1
-#define WINDOW_STATS_MAX_RECENT_AVG	2
-#define WINDOW_STATS_AVG		3
-#define WINDOW_STATS_INVALID_POLICY	4
+#include <trace/events/sched.h>
 
-#define EXITING_TASK_MARKER	0xdeaddead
+const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
+				  "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
+				"IRQ_UPDATE"};
 
-static __read_mostly unsigned int walt_ravg_hist_size = 5;
-static __read_mostly unsigned int walt_window_stats_policy =
-	WINDOW_STATS_MAX_RECENT_AVG;
-static __read_mostly unsigned int walt_account_wait_time = 1;
-static __read_mostly unsigned int walt_freq_account_wait_time = 0;
-static __read_mostly unsigned int walt_io_is_busy = 0;
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
+					 "RQ_TO_RQ", "GROUP_TO_GROUP"};
 
-unsigned int sysctl_sched_walt_init_task_load_pct = 15;
+#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
+#define SCHED_ACCOUNT_WAIT_TIME 1
+
+#define EARLY_DETECTION_DURATION 9500000
+
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
+static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
+static bool use_cycle_counter;
+DEFINE_MUTEX(cluster_lock);
+
+u64 sched_ktime_clock(void)
+{
+	if (unlikely(sched_ktime_suspended))
+		return ktime_to_ns(ktime_last);
+	return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+	sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+	ktime_last = ktime_get();
+	sched_ktime_suspended = true;
+	return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+	.resume	= sched_resume,
+	.suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+	register_syscore_ops(&sched_syscore_ops);
+	return 0;
+}
+late_initcall(sched_init_ops);
+
+static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
+				     unsigned long *flags)
+{
+	int cpu;
+
+	local_irq_save(*flags);
+	for_each_cpu(cpu, cpus)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+}
+
+static void release_rq_locks_irqrestore(const cpumask_t *cpus,
+					unsigned long *flags)
+{
+	int cpu;
+
+	for_each_cpu(cpu, cpus)
+		raw_spin_unlock(&cpu_rq(cpu)->lock);
+	local_irq_restore(*flags);
+}
+
+struct timer_list sched_grp_timer;
+static void sched_agg_grp_load(unsigned long data)
+{
+	struct sched_cluster *cluster;
+	unsigned long flags;
+	int cpu;
+
+	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+	for_each_sched_cluster(cluster) {
+		u64 aggr_grp_load = 0;
+
+		for_each_cpu(cpu, &cluster->cpus) {
+			struct rq *rq = cpu_rq(cpu);
+
+			if (rq->curr)
+				update_task_ravg(rq->curr, rq, TASK_UPDATE,
+						sched_ktime_clock(), 0);
+			aggr_grp_load +=
+				rq->grp_time.prev_runnable_sum;
+		}
+
+		cluster->aggr_grp_load = aggr_grp_load;
+	}
+
+	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+
+	if (sched_boost() == RESTRAINED_BOOST)
+		mod_timer(&sched_grp_timer, jiffies + 1);
+}
+
+static int __init setup_sched_grp_timer(void)
+{
+	init_timer_deferrable(&sched_grp_timer);
+	sched_grp_timer.function = sched_agg_grp_load;
+	return 0;
+}
+late_initcall(setup_sched_grp_timer);
 
 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
 unsigned int __read_mostly walt_disabled = 0;
 
-static unsigned int max_possible_efficiency = 1024;
-static unsigned int min_possible_efficiency = 1024;
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+
+/*
+ * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
+ * associated with them. This is required for atomic update of those variables
+ * when being modifed via sysctl interface.
+ *
+ * IMPORTANT: Initialize both copies to same value!!
+ */
+
+__read_mostly unsigned int sched_ravg_hist_size = 5;
+__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
+
+static __read_mostly unsigned int sched_io_is_busy = 1;
+
+__read_mostly unsigned int sched_window_stats_policy =
+	WINDOW_STATS_MAX_RECENT_AVG;
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
+	WINDOW_STATS_MAX_RECENT_AVG;
+
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
+
+/* Initial task load. Newly created tasks are assigned this load. */
+unsigned int __read_mostly sched_init_task_load_windows;
+unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
 
 /*
  * Maximum possible frequency across all cpus. Task demand and cpu
  * capacity (cpu_power) metrics are scaled in reference to it.
  */
-static unsigned int max_possible_freq = 1;
+unsigned int max_possible_freq = 1;
 
 /*
  * Minimum possible max_freq across all cpus. This will be same as
@@ -60,126 +180,126 @@
  * max_possible_freq on heterogenous systems. min_max_freq is used to derive
  * capacity (cpu_power) of cpus.
  */
-static unsigned int min_max_freq = 1;
+unsigned int min_max_freq = 1;
 
-static unsigned int max_capacity = 1024;
-static unsigned int min_capacity = 1024;
-static unsigned int max_load_scale_factor = 1024;
-static unsigned int max_possible_capacity = 1024;
+unsigned int max_capacity = 1024; /* max(rq->capacity) */
+unsigned int min_capacity = 1024; /* min(rq->capacity) */
+unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
+unsigned int
+min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
 
-/* Mask of all CPUs that have  max_possible_capacity */
-static cpumask_t mpc_mask = CPU_MASK_ALL;
+/* Temporarily disable window-stats activity on all cpus */
+unsigned int __read_mostly sched_disable_window_stats;
 
-/* Window size (in ns) */
-__read_mostly unsigned int walt_ravg_window = 20000000;
+/*
+ * Task load is categorized into buckets for the purpose of top task tracking.
+ * The entire range of load from 0 to sched_ravg_window needs to be covered
+ * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
+ * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
+ * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
+ * sched_load_granule.
+ */
+__read_mostly unsigned int sched_load_granule =
+			MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
 
-/* Min window size (in ns) = 10ms */
-#define MIN_SCHED_RAVG_WINDOW 10000000
+/* Size of bitmaps maintained to track top tasks */
+static const unsigned int top_tasks_bitmap_size =
+		BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
 
-/* Max window size (in ns) = 1s */
-#define MAX_SCHED_RAVG_WINDOW 1000000000
+/*
+ * This governs what load needs to be used when reporting CPU busy time
+ * to the cpufreq governor.
+ */
+__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
 
-static unsigned int sync_cpu;
-static ktime_t ktime_last;
-static __read_mostly bool walt_ktime_suspended;
-
-static unsigned int task_load(struct task_struct *p)
+static int __init set_sched_ravg_window(char *str)
 {
-	return p->ravg.demand;
-}
+	unsigned int window_size;
 
-void
-walt_inc_cumulative_runnable_avg(struct rq *rq,
-				 struct task_struct *p)
-{
-	rq->cumulative_runnable_avg += p->ravg.demand;
-}
+	get_option(&str, &window_size);
 
-void
-walt_dec_cumulative_runnable_avg(struct rq *rq,
-				 struct task_struct *p)
-{
-	rq->cumulative_runnable_avg -= p->ravg.demand;
-	BUG_ON((s64)rq->cumulative_runnable_avg < 0);
-}
-
-static void
-fixup_cumulative_runnable_avg(struct rq *rq,
-			      struct task_struct *p, s64 task_load_delta)
-{
-	rq->cumulative_runnable_avg += task_load_delta;
-	if ((s64)rq->cumulative_runnable_avg < 0)
-		panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
-			task_load_delta, task_load(p));
-}
-
-u64 walt_ktime_clock(void)
-{
-	if (unlikely(walt_ktime_suspended))
-		return ktime_to_ns(ktime_last);
-	return ktime_get_ns();
-}
-
-static void walt_resume(void)
-{
-	walt_ktime_suspended = false;
-}
-
-static int walt_suspend(void)
-{
-	ktime_last = ktime_get();
-	walt_ktime_suspended = true;
-	return 0;
-}
-
-static struct syscore_ops walt_syscore_ops = {
-	.resume	= walt_resume,
-	.suspend = walt_suspend
-};
-
-static int __init walt_init_ops(void)
-{
-	register_syscore_ops(&walt_syscore_ops);
-	return 0;
-}
-late_initcall(walt_init_ops);
-
-void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
-		struct task_struct *p)
-{
-	cfs_rq->cumulative_runnable_avg += p->ravg.demand;
-}
-
-void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
-		struct task_struct *p)
-{
-	cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
-}
-
-static int exiting_task(struct task_struct *p)
-{
-	if (p->flags & PF_EXITING) {
-		if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
-			p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-		}
-		return 1;
+	if (window_size < MIN_SCHED_RAVG_WINDOW ||
+			window_size > MAX_SCHED_RAVG_WINDOW) {
+		WARN_ON(1);
+		return -EINVAL;
 	}
+
+	sched_ravg_window = window_size;
 	return 0;
 }
 
-static int __init set_walt_ravg_window(char *str)
+early_param("sched_ravg_window", set_sched_ravg_window);
+
+void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
 {
-	get_option(&str, &walt_ravg_window);
-
-	walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
-				walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
-	return 0;
+	inc_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&rq->hmp_stats, p);
 }
 
-early_param("walt_ravg_window", set_walt_ravg_window);
+void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+{
+	stats->nr_big_tasks = 0; /* never happens on EAS */
+	if (reset_cra) {
+		stats->cumulative_runnable_avg = 0;
+		stats->pred_demands_sum = 0;
+	}
+}
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ *	Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ *	One related thread group A that has tasks A0, A1, A2
+ *
+ *	A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ *	tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ *	CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ *	not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ *	C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ *		Task A0 ran 5ms on CPU0
+ *		Task B0 ran 1ms on CPU0
+ *
+ *	C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ *		Task A1 ran 4ms on CPU1
+ *		Task A2 ran 2ms on CPU1
+ *		Task B1 ran 5ms on CPU1
+ *
+ *	C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ *		CPU2 idle
+ *
+ *	C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ *		CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ *	C0 busy time = 1ms
+ *	C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+__read_mostly int sched_freq_aggregate_threshold;
 
 static void
-update_window_start(struct rq *rq, u64 wallclock)
+update_window_start(struct rq *rq, u64 wallclock, int event)
 {
 	s64 delta;
 	int nr_windows;
@@ -191,42 +311,123 @@
 		WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
 	}
 
-	if (delta < walt_ravg_window)
+	if (delta < sched_ravg_window)
 		return;
 
-	nr_windows = div64_u64(delta, walt_ravg_window);
-	rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
+	nr_windows = div64_u64(delta, sched_ravg_window);
+	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
+
+	rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
+	if (event == PUT_PREV_TASK)
+		rq->cum_window_demand += rq->curr->ravg.demand;
 }
 
-static u64 scale_exec_time(u64 delta, struct rq *rq)
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
 {
-	unsigned int cur_freq = rq->cur_freq;
-	int sf;
+	mutex_lock(&cluster_lock);
+	if (!cb->get_cpu_cycle_counter) {
+		mutex_unlock(&cluster_lock);
+		return -EINVAL;
+	}
 
-	if (unlikely(cur_freq > max_possible_freq))
-		cur_freq = rq->max_possible_freq;
+	cpu_cycle_counter_cb = *cb;
+	use_cycle_counter = true;
+	mutex_unlock(&cluster_lock);
 
-	/* round up div64 */
-	delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
-			  max_possible_freq);
-
-	sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
-
-	delta *= sf;
-	delta >>= 10;
-
-	return delta;
+	return 0;
 }
 
-static int cpu_is_waiting_on_io(struct rq *rq)
+static void update_task_cpu_cycles(struct task_struct *p, int cpu)
 {
-	if (!walt_io_is_busy)
+	if (use_cycle_counter)
+		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+}
+
+void clear_ed_task(struct task_struct *p, struct rq *rq)
+{
+	if (p == rq->ed_task)
+		rq->ed_task = NULL;
+}
+
+bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	struct task_struct *p;
+	int loop_max = 10;
+
+	if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
 		return 0;
 
-	return atomic_read(&rq->nr_iowait);
+	rq->ed_task = NULL;
+	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+		if (!loop_max)
+			break;
+
+		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+			rq->ed_task = p;
+			return 1;
+		}
+
+		loop_max--;
+	}
+
+	return 0;
 }
 
-void walt_account_irqtime(int cpu, struct task_struct *curr,
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!rq->window_start || sched_disable_window_stats)
+		return;
+
+	if (is_idle_task(curr)) {
+		/* We're here without rq->lock held, IRQ disabled */
+		raw_spin_lock(&rq->lock);
+		update_task_cpu_cycles(curr, cpu);
+		raw_spin_unlock(&rq->lock);
+	}
+}
+
+/*
+ * Return total number of tasks "eligible" to run on highest capacity cpu
+ *
+ * This is simply nr_big_tasks for cpus which are not of max_capacity and
+ * nr_running for cpus of max_capacity
+ */
+unsigned int nr_eligible_big_tasks(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+		return rq->hmp_stats.nr_big_tasks;
+
+	return rq->nr_running;
+}
+
+void clear_hmp_request(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	clear_boost_kick(cpu);
+	clear_reserved(cpu);
+	if (rq->push_task) {
+		struct task_struct *push_task = NULL;
+
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		if (rq->push_task) {
+			clear_reserved(rq->push_cpu);
+			push_task = rq->push_task;
+			rq->push_task = NULL;
+		}
+		rq->active_balance = 0;
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+		if (push_task)
+			put_task_struct(push_task);
+	}
+}
+
+void sched_account_irqtime(int cpu, struct task_struct *curr,
 				 u64 delta, u64 wallclock)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -243,7 +444,7 @@
 	cur_jiffies_ts = get_jiffies_64();
 
 	if (is_idle_task(curr))
-		walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
+		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
 				 delta);
 
 	nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -266,29 +467,824 @@
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
+/*
+ * Special case the last index and provide a fast path for index = 0.
+ * Note that sched_load_granule can change underneath us if we are not
+ * holding any runqueue locks while calling the two functions below.
+ */
+static u32  top_task_load(struct rq *rq)
+{
+	int index = rq->prev_top;
+	u8 prev = 1 - rq->curr_table;
 
-#define WALT_HIGH_IRQ_TIMEOUT 3
+	if (!index) {
+		int msb = NUM_LOAD_INDICES - 1;
 
-u64 walt_irqload(int cpu) {
-	struct rq *rq = cpu_rq(cpu);
-	s64 delta;
-	delta = get_jiffies_64() - rq->irqload_ts;
-
-        /*
-	 * Current context can be preempted by irq and rq->irqload_ts can be
-	 * updated by irq context so that delta can be negative.
-	 * But this is okay and we can safely return as this means there
-	 * was recent irq occurrence.
-	 */
-
-        if (delta < WALT_HIGH_IRQ_TIMEOUT)
-		return rq->avg_irqload;
-        else
-		return 0;
+		if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
+			return 0;
+		else
+			return sched_load_granule;
+	} else if (index == NUM_LOAD_INDICES - 1) {
+		return sched_ravg_window;
+	} else {
+		return (index + 1) * sched_load_granule;
+	}
 }
 
-int walt_cpu_high_irqload(int cpu) {
-	return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
+u64 freq_policy_load(struct rq *rq)
+{
+	unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+	struct sched_cluster *cluster = rq->cluster;
+	u64 aggr_grp_load = cluster->aggr_grp_load;
+	u64 load;
+
+	if (rq->ed_task != NULL)
+		return sched_ravg_window;
+
+	if (aggr_grp_load > sched_freq_aggregate_threshold)
+		load = rq->prev_runnable_sum + aggr_grp_load;
+	else
+		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+
+	switch (reporting_policy) {
+	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
+		load = max_t(u64, load, top_task_load(rq));
+		break;
+	case FREQ_REPORT_TOP_TASK:
+		load = top_task_load(rq);
+		break;
+	case FREQ_REPORT_CPU_LOAD:
+		break;
+	default:
+		break;
+	}
+
+	return load;
+}
+
+/*
+ * In this function we match the accumulated subtractions with the current
+ * and previous windows we are operating with. Ignore any entries where
+ * the window start in the load_subtraction struct does not match either
+ * the curent or the previous window. This could happen whenever CPUs
+ * become idle or busy with interrupts disabled for an extended period.
+ */
+static inline void account_load_subtractions(struct rq *rq)
+{
+	u64 ws = rq->window_start;
+	u64 prev_ws = ws - sched_ravg_window;
+	struct load_subtractions *ls = rq->load_subs;
+	int i;
+
+	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+		if (ls[i].window_start == ws) {
+			rq->curr_runnable_sum -= ls[i].subs;
+			rq->nt_curr_runnable_sum -= ls[i].new_subs;
+		} else if (ls[i].window_start == prev_ws) {
+			rq->prev_runnable_sum -= ls[i].subs;
+			rq->nt_prev_runnable_sum -= ls[i].new_subs;
+		}
+
+		ls[i].subs = 0;
+		ls[i].new_subs = 0;
+	}
+
+	BUG_ON((s64)rq->prev_runnable_sum < 0);
+	BUG_ON((s64)rq->curr_runnable_sum < 0);
+	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
+	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
+}
+
+static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
+{
+	rq->load_subs[index].window_start = ws;
+	rq->load_subs[index].subs = 0;
+	rq->load_subs[index].new_subs = 0;
+}
+
+static int get_top_index(unsigned long *bitmap, unsigned long old_top)
+{
+	int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
+
+	if (index == NUM_LOAD_INDICES)
+		return 0;
+
+	return NUM_LOAD_INDICES - 1 - index;
+}
+
+static bool get_subtraction_index(struct rq *rq, u64 ws)
+{
+	int i;
+	u64 oldest = ULLONG_MAX;
+	int oldest_index = 0;
+
+	for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+		u64 entry_ws = rq->load_subs[i].window_start;
+
+		if (ws == entry_ws)
+			return i;
+
+		if (entry_ws < oldest) {
+			oldest = entry_ws;
+			oldest_index = i;
+		}
+	}
+
+	create_subtraction_entry(rq, ws, oldest_index);
+	return oldest_index;
+}
+
+static void update_rq_load_subtractions(int index, struct rq *rq,
+					u32 sub_load, bool new_task)
+{
+	rq->load_subs[index].subs +=  sub_load;
+	if (new_task)
+		rq->load_subs[index].new_subs += sub_load;
+}
+
+void update_cluster_load_subtractions(struct task_struct *p,
+					int cpu, u64 ws, bool new_task)
+{
+	struct sched_cluster *cluster = cpu_cluster(cpu);
+	struct cpumask cluster_cpus = cluster->cpus;
+	u64 prev_ws = ws - sched_ravg_window;
+	int i;
+
+	cpumask_clear_cpu(cpu, &cluster_cpus);
+	raw_spin_lock(&cluster->load_lock);
+
+	for_each_cpu(i, &cluster_cpus) {
+		struct rq *rq = cpu_rq(i);
+		int index;
+
+		if (p->ravg.curr_window_cpu[i]) {
+			index = get_subtraction_index(rq, ws);
+			update_rq_load_subtractions(index, rq,
+				p->ravg.curr_window_cpu[i], new_task);
+			p->ravg.curr_window_cpu[i] = 0;
+		}
+
+		if (p->ravg.prev_window_cpu[i]) {
+			index = get_subtraction_index(rq, prev_ws);
+			update_rq_load_subtractions(index, rq,
+				p->ravg.prev_window_cpu[i], new_task);
+			p->ravg.prev_window_cpu[i] = 0;
+		}
+	}
+
+	raw_spin_unlock(&cluster->load_lock);
+}
+
+#ifdef CONFIG_SCHED_HMP
+static inline void
+init_new_task_load_hmp(struct task_struct *p, bool idle_task)
+{
+	p->ravg.curr_burst = 0;
+	/*
+	 * Initialize the avg_burst to twice the threshold, so that
+	 * a task would not be classified as short burst right away
+	 * after fork. It takes at least 6 sleep-wakeup cycles for
+	 * the avg_burst to go below the threshold.
+	 */
+	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+	p->ravg.avg_sleep_time = 0;
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
+{
+	/*
+	 * update_task_demand() has checks for idle task and
+	 * exit task. The runtime may include the wait time,
+	 * so update the burst only for the cases where the
+	 * task is running.
+	 */
+	if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
+				rq->curr == p))
+		p->ravg.curr_burst += runtime;
+}
+
+static void reset_task_stats_hmp(struct task_struct *p)
+{
+	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+}
+#else
+static inline void
+init_new_task_load_hmp(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
+{
+}
+
+static void reset_task_stats_hmp(struct task_struct *p)
+{
+}
+#endif
+
+static inline void inter_cluster_migration_fixup
+	(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
+{
+	struct rq *dest_rq = cpu_rq(new_cpu);
+	struct rq *src_rq = cpu_rq(task_cpu);
+
+	if (same_freq_domain(new_cpu, task_cpu))
+		return;
+
+	p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
+	p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
+
+	dest_rq->curr_runnable_sum += p->ravg.curr_window;
+	dest_rq->prev_runnable_sum += p->ravg.prev_window;
+
+	src_rq->curr_runnable_sum -=  p->ravg.curr_window_cpu[task_cpu];
+	src_rq->prev_runnable_sum -=  p->ravg.prev_window_cpu[task_cpu];
+
+	if (new_task) {
+		dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
+		dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
+
+		src_rq->nt_curr_runnable_sum -=
+				p->ravg.curr_window_cpu[task_cpu];
+		src_rq->nt_prev_runnable_sum -=
+				p->ravg.prev_window_cpu[task_cpu];
+	}
+
+	p->ravg.curr_window_cpu[task_cpu] = 0;
+	p->ravg.prev_window_cpu[task_cpu] = 0;
+
+	update_cluster_load_subtractions(p, task_cpu,
+			src_rq->window_start, new_task);
+
+	BUG_ON((s64)src_rq->prev_runnable_sum < 0);
+	BUG_ON((s64)src_rq->curr_runnable_sum < 0);
+	BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
+	BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
+}
+
+static int load_to_index(u32 load)
+{
+	if (load < sched_load_granule)
+		return 0;
+	else if (load >= sched_ravg_window)
+		return NUM_LOAD_INDICES - 1;
+	else
+		return load / sched_load_granule;
+}
+
+static void
+migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
+{
+	int index;
+	int top_index;
+	u32 curr_window = p->ravg.curr_window;
+	u32 prev_window = p->ravg.prev_window;
+	u8 src = src_rq->curr_table;
+	u8 dst = dst_rq->curr_table;
+	u8 *src_table;
+	u8 *dst_table;
+
+	if (curr_window) {
+		src_table = src_rq->top_tasks[src];
+		dst_table = dst_rq->top_tasks[dst];
+		index = load_to_index(curr_window);
+		src_table[index] -= 1;
+		dst_table[index] += 1;
+
+		if (!src_table[index])
+			__clear_bit(NUM_LOAD_INDICES - index - 1,
+				src_rq->top_tasks_bitmap[src]);
+
+		if (dst_table[index] == 1)
+			__set_bit(NUM_LOAD_INDICES - index - 1,
+				dst_rq->top_tasks_bitmap[dst]);
+
+		if (index > dst_rq->curr_top)
+			dst_rq->curr_top = index;
+
+		top_index = src_rq->curr_top;
+		if (index == top_index && !src_table[index])
+			src_rq->curr_top = get_top_index(
+				src_rq->top_tasks_bitmap[src], top_index);
+	}
+
+	if (prev_window) {
+		src = 1 - src;
+		dst = 1 - dst;
+		src_table = src_rq->top_tasks[src];
+		dst_table = dst_rq->top_tasks[dst];
+		index = load_to_index(prev_window);
+		src_table[index] -= 1;
+		dst_table[index] += 1;
+
+		if (!src_table[index])
+			__clear_bit(NUM_LOAD_INDICES - index - 1,
+				src_rq->top_tasks_bitmap[src]);
+
+		if (dst_table[index] == 1)
+			__set_bit(NUM_LOAD_INDICES - index - 1,
+				dst_rq->top_tasks_bitmap[dst]);
+
+		if (index > dst_rq->prev_top)
+			dst_rq->prev_top = index;
+
+		top_index = src_rq->prev_top;
+		if (index == top_index && !src_table[index])
+			src_rq->prev_top = get_top_index(
+				src_rq->top_tasks_bitmap[src], top_index);
+	}
+}
+
+void fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+	struct rq *src_rq = task_rq(p);
+	struct rq *dest_rq = cpu_rq(new_cpu);
+	u64 wallclock;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	bool new_task;
+	struct related_thread_group *grp;
+
+	if (!p->on_rq && p->state != TASK_WAKING)
+		return;
+
+	if (exiting_task(p)) {
+		clear_ed_task(p, src_rq);
+		return;
+	}
+
+	if (p->state == TASK_WAKING)
+		double_rq_lock(src_rq, dest_rq);
+
+	if (sched_disable_window_stats)
+		goto done;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(task_rq(p)->curr, task_rq(p),
+			 TASK_UPDATE,
+			 wallclock, 0);
+	update_task_ravg(dest_rq->curr, dest_rq,
+			 TASK_UPDATE, wallclock, 0);
+
+	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
+			 wallclock, 0);
+
+	update_task_cpu_cycles(p, new_cpu);
+
+	if (__task_in_cum_window_demand(src_rq, p)) {
+		dec_cum_window_demand(src_rq, p);
+		inc_cum_window_demand(dest_rq, p, p->ravg.demand);
+	}
+
+	new_task = is_new_task(p);
+	/* Protected by rq_lock */
+	grp = p->grp;
+
+	/*
+	 * For frequency aggregation, we continue to do migration fixups
+	 * even for intra cluster migrations. This is because, the aggregated
+	 * load has to reported on a single CPU regardless.
+	 */
+	if (grp) {
+		struct group_cpu_time *cpu_time;
+
+		cpu_time = &src_rq->grp_time;
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		cpu_time = &dest_rq->grp_time;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		if (p->ravg.curr_window) {
+			*src_curr_runnable_sum -= p->ravg.curr_window;
+			*dst_curr_runnable_sum += p->ravg.curr_window;
+			if (new_task) {
+				*src_nt_curr_runnable_sum -=
+							p->ravg.curr_window;
+				*dst_nt_curr_runnable_sum +=
+							p->ravg.curr_window;
+			}
+		}
+
+		if (p->ravg.prev_window) {
+			*src_prev_runnable_sum -= p->ravg.prev_window;
+			*dst_prev_runnable_sum += p->ravg.prev_window;
+			if (new_task) {
+				*src_nt_prev_runnable_sum -=
+							p->ravg.prev_window;
+				*dst_nt_prev_runnable_sum +=
+							p->ravg.prev_window;
+			}
+		}
+	} else {
+		inter_cluster_migration_fixup(p, new_cpu,
+						task_cpu(p), new_task);
+	}
+
+	migrate_top_tasks(p, src_rq, dest_rq);
+
+	if (!same_freq_domain(new_cpu, task_cpu(p))) {
+		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+	}
+
+	if (p == src_rq->ed_task) {
+		src_rq->ed_task = NULL;
+		if (!dest_rq->ed_task)
+			dest_rq->ed_task = p;
+	}
+
+done:
+	if (p->state == TASK_WAKING)
+		double_rq_unlock(src_rq, dest_rq);
+}
+
+void set_window_start(struct rq *rq)
+{
+	static int sync_cpu_available;
+
+	if (rq->window_start)
+		return;
+
+	if (!sync_cpu_available) {
+		rq->window_start = sched_ktime_clock();
+		sync_cpu_available = 1;
+	} else {
+		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
+
+		raw_spin_unlock(&rq->lock);
+		double_rq_lock(rq, sync_rq);
+		rq->window_start = sync_rq->window_start;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		raw_spin_unlock(&sync_rq->lock);
+	}
+
+	rq->curr->ravg.mark_start = rq->window_start;
+}
+
+unsigned int max_possible_efficiency = 1;
+unsigned int min_possible_efficiency = UINT_MAX;
+
+#define INC_STEP 8
+#define DEC_STEP 2
+#define CONSISTENT_THRES 16
+#define INC_STEP_BIG 16
+/*
+ * bucket_increase - update the count of all buckets
+ *
+ * @buckets: array of buckets tracking busy time of a task
+ * @idx: the index of bucket to be incremented
+ *
+ * Each time a complete window finishes, count of bucket that runtime
+ * falls in (@idx) is incremented. Counts of all other buckets are
+ * decayed. The rate of increase and decay could be different based
+ * on current count in the bucket.
+ */
+static inline void bucket_increase(u8 *buckets, int idx)
+{
+	int i, step;
+
+	for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
+		if (idx != i) {
+			if (buckets[i] > DEC_STEP)
+				buckets[i] -= DEC_STEP;
+			else
+				buckets[i] = 0;
+		} else {
+			step = buckets[i] >= CONSISTENT_THRES ?
+						INC_STEP_BIG : INC_STEP;
+			if (buckets[i] > U8_MAX - step)
+				buckets[i] = U8_MAX;
+			else
+				buckets[i] += step;
+		}
+	}
+}
+
+static inline int busy_to_bucket(u32 normalized_rt)
+{
+	int bidx;
+
+	bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
+	bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
+
+	/*
+	 * Combine lowest two buckets. The lowest frequency falls into
+	 * 2nd bucket and thus keep predicting lowest bucket is not
+	 * useful.
+	 */
+	if (!bidx)
+		bidx++;
+
+	return bidx;
+}
+
+/*
+ * get_pred_busy - calculate predicted demand for a task on runqueue
+ *
+ * @rq: runqueue of task p
+ * @p: task whose prediction is being updated
+ * @start: starting bucket. returned prediction should not be lower than
+ *         this bucket.
+ * @runtime: runtime of the task. returned prediction should not be lower
+ *           than this runtime.
+ * Note: @start can be derived from @runtime. It's passed in only to
+ * avoid duplicated calculation in some cases.
+ *
+ * A new predicted busy time is returned for task @p based on @runtime
+ * passed in. The function searches through buckets that represent busy
+ * time equal to or bigger than @runtime and attempts to find the bucket to
+ * to use for prediction. Once found, it searches through historical busy
+ * time and returns the latest that falls into the bucket. If no such busy
+ * time exists, it returns the medium of that bucket.
+ */
+static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+				int start, u32 runtime)
+{
+	int i;
+	u8 *buckets = p->ravg.busy_buckets;
+	u32 *hist = p->ravg.sum_history;
+	u32 dmin, dmax;
+	u64 cur_freq_runtime = 0;
+	int first = NUM_BUSY_BUCKETS, final;
+	u32 ret = runtime;
+
+	/* skip prediction for new tasks due to lack of history */
+	if (unlikely(is_new_task(p)))
+		goto out;
+
+	/* find minimal bucket index to pick */
+	for (i = start; i < NUM_BUSY_BUCKETS; i++) {
+		if (buckets[i]) {
+			first = i;
+			break;
+		}
+	}
+	/* if no higher buckets are filled, predict runtime */
+	if (first >= NUM_BUSY_BUCKETS)
+		goto out;
+
+	/* compute the bucket for prediction */
+	final = first;
+
+	/* determine demand range for the predicted bucket */
+	if (final < 2) {
+		/* lowest two buckets are combined */
+		dmin = 0;
+		final = 1;
+	} else {
+		dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
+	}
+	dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
+
+	/*
+	 * search through runtime history and return first runtime that falls
+	 * into the range of predicted bucket.
+	 */
+	for (i = 0; i < sched_ravg_hist_size; i++) {
+		if (hist[i] >= dmin && hist[i] < dmax) {
+			ret = hist[i];
+			break;
+		}
+	}
+	/* no historical runtime within bucket found, use average of the bin */
+	if (ret < dmin)
+		ret = (dmin + dmax) / 2;
+	/*
+	 * when updating in middle of a window, runtime could be higher
+	 * than all recorded history. Always predict at least runtime.
+	 */
+	ret = max(runtime, ret);
+out:
+	trace_sched_update_pred_demand(rq, p, runtime,
+		mult_frac((unsigned int)cur_freq_runtime, 100,
+			  sched_ravg_window), ret);
+	return ret;
+}
+
+static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+{
+	if (p->ravg.pred_demand >= p->ravg.curr_window)
+		return p->ravg.pred_demand;
+
+	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+			     p->ravg.curr_window);
+}
+
+/*
+ * predictive demand of a task is calculated at the window roll-over.
+ * if the task current window busy time exceeds the predicted
+ * demand, update it here to reflect the task needs.
+ */
+void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
+{
+	u32 new, old;
+
+	if (is_idle_task(p) || exiting_task(p))
+		return;
+
+	if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
+			(!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
+			 (event != TASK_MIGRATE &&
+			 event != PICK_NEXT_TASK)))
+		return;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
+			return;
+	}
+
+	new = calc_pred_demand(rq, p);
+	old = p->ravg.pred_demand;
+
+	if (old >= new)
+		return;
+
+	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+				!p->dl.dl_throttled))
+		p->sched_class->fixup_hmp_sched_stats(rq, p,
+				p->ravg.demand,
+				new);
+
+	p->ravg.pred_demand = new;
+}
+
+void clear_top_tasks_bitmap(unsigned long *bitmap)
+{
+	memset(bitmap, 0, top_tasks_bitmap_size);
+	__set_bit(NUM_LOAD_INDICES, bitmap);
+}
+
+static void update_top_tasks(struct task_struct *p, struct rq *rq,
+		u32 old_curr_window, int new_window, bool full_window)
+{
+	u8 curr = rq->curr_table;
+	u8 prev = 1 - curr;
+	u8 *curr_table = rq->top_tasks[curr];
+	u8 *prev_table = rq->top_tasks[prev];
+	int old_index, new_index, update_index;
+	u32 curr_window = p->ravg.curr_window;
+	u32 prev_window = p->ravg.prev_window;
+	bool zero_index_update;
+
+	if (old_curr_window == curr_window && !new_window)
+		return;
+
+	old_index = load_to_index(old_curr_window);
+	new_index = load_to_index(curr_window);
+
+	if (!new_window) {
+		zero_index_update = !old_curr_window && curr_window;
+		if (old_index != new_index || zero_index_update) {
+			if (old_curr_window)
+				curr_table[old_index] -= 1;
+			if (curr_window)
+				curr_table[new_index] += 1;
+			if (new_index > rq->curr_top)
+				rq->curr_top = new_index;
+		}
+
+		if (!curr_table[old_index])
+			__clear_bit(NUM_LOAD_INDICES - old_index - 1,
+				rq->top_tasks_bitmap[curr]);
+
+		if (curr_table[new_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - new_index - 1,
+				rq->top_tasks_bitmap[curr]);
+
+		return;
+	}
+
+	/*
+	 * The window has rolled over for this task. By the time we get
+	 * here, curr/prev swaps would has already occurred. So we need
+	 * to use prev_window for the new index.
+	 */
+	update_index = load_to_index(prev_window);
+
+	if (full_window) {
+		/*
+		 * Two cases here. Either 'p' ran for the entire window or
+		 * it didn't run at all. In either case there is no entry
+		 * in the prev table. If 'p' ran the entire window, we just
+		 * need to create a new entry in the prev table. In this case
+		 * update_index will be correspond to sched_ravg_window
+		 * so we can unconditionally update the top index.
+		 */
+		if (prev_window) {
+			prev_table[update_index] += 1;
+			rq->prev_top = update_index;
+		}
+
+		if (prev_table[update_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - update_index - 1,
+				rq->top_tasks_bitmap[prev]);
+	} else {
+		zero_index_update = !old_curr_window && prev_window;
+		if (old_index != update_index || zero_index_update) {
+			if (old_curr_window)
+				prev_table[old_index] -= 1;
+
+			prev_table[update_index] += 1;
+
+			if (update_index > rq->prev_top)
+				rq->prev_top = update_index;
+
+			if (!prev_table[old_index])
+				__clear_bit(NUM_LOAD_INDICES - old_index - 1,
+						rq->top_tasks_bitmap[prev]);
+
+			if (prev_table[update_index] == 1)
+				__set_bit(NUM_LOAD_INDICES - update_index - 1,
+						rq->top_tasks_bitmap[prev]);
+		}
+	}
+
+	if (curr_window) {
+		curr_table[new_index] += 1;
+
+		if (new_index > rq->curr_top)
+			rq->curr_top = new_index;
+
+		if (curr_table[new_index] == 1)
+			__set_bit(NUM_LOAD_INDICES - new_index - 1,
+				rq->top_tasks_bitmap[curr]);
+	}
+}
+
+static void rollover_top_tasks(struct rq *rq, bool full_window)
+{
+	u8 curr_table = rq->curr_table;
+	u8 prev_table = 1 - curr_table;
+	int curr_top = rq->curr_top;
+
+	clear_top_tasks_table(rq->top_tasks[prev_table]);
+	clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
+
+	if (full_window) {
+		curr_top = 0;
+		clear_top_tasks_table(rq->top_tasks[curr_table]);
+		clear_top_tasks_bitmap(
+				rq->top_tasks_bitmap[curr_table]);
+	}
+
+	rq->curr_table = prev_table;
+	rq->prev_top = curr_top;
+	rq->curr_top = 0;
+}
+
+static u32 empty_windows[NR_CPUS];
+
+static void rollover_task_window(struct task_struct *p, bool full_window)
+{
+	u32 *curr_cpu_windows = empty_windows;
+	u32 curr_window;
+	int i;
+
+	/* Rollover the sum */
+	curr_window = 0;
+
+	if (!full_window) {
+		curr_window = p->ravg.curr_window;
+		curr_cpu_windows = p->ravg.curr_window_cpu;
+	}
+
+	p->ravg.prev_window = curr_window;
+	p->ravg.curr_window = 0;
+
+	/* Roll over individual CPU contributions */
+	for (i = 0; i < nr_cpu_ids; i++) {
+		p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
+		p->ravg.curr_window_cpu[i] = 0;
+	}
+}
+
+void sched_set_io_is_busy(int val)
+{
+	sched_io_is_busy = val;
+}
+
+static inline int cpu_is_waiting_on_io(struct rq *rq)
+{
+	if (!sched_io_is_busy)
+		return 0;
+
+	return atomic_read(&rq->nr_iowait);
 }
 
 static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
@@ -306,99 +1302,150 @@
 	if (event == TASK_WAKE)
 		return 0;
 
-	if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
-					 event == TASK_UPDATE)
+	if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
 		return 1;
 
-	/* Only TASK_MIGRATE && PICK_NEXT_TASK left */
-	return walt_freq_account_wait_time;
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
+	}
+
+	/* TASK_MIGRATE, PICK_NEXT_TASK left */
+	return SCHED_FREQ_ACCOUNT_WAIT_TIME;
+}
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+	u32 freq;
+
+	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
+	delta *= rq->cluster->exec_scale_factor;
+	delta >>= 10;
+
+	return delta;
+}
+
+static void rollover_cpu_window(struct rq *rq, bool full_window)
+{
+	u64 curr_sum = rq->curr_runnable_sum;
+	u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+	u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
+	u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
+
+	if (unlikely(full_window)) {
+		curr_sum = 0;
+		nt_curr_sum = 0;
+		grp_curr_sum = 0;
+		grp_nt_curr_sum = 0;
+	}
+
+	rq->prev_runnable_sum = curr_sum;
+	rq->nt_prev_runnable_sum = nt_curr_sum;
+	rq->grp_time.prev_runnable_sum = grp_curr_sum;
+	rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
+
+	rq->curr_runnable_sum = 0;
+	rq->nt_curr_runnable_sum = 0;
+	rq->grp_time.curr_runnable_sum = 0;
+	rq->grp_time.nt_curr_runnable_sum = 0;
 }
 
 /*
  * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
  */
 static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
-	     int event, u64 wallclock, u64 irqtime)
+				 int event, u64 wallclock, u64 irqtime)
 {
-	int new_window, nr_full_windows = 0;
+	int new_window, full_window = 0;
 	int p_is_curr_task = (p == rq->curr);
 	u64 mark_start = p->ravg.mark_start;
 	u64 window_start = rq->window_start;
-	u32 window_size = walt_ravg_window;
+	u32 window_size = sched_ravg_window;
 	u64 delta;
+	u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+	u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+	u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+	u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+	bool new_task;
+	struct related_thread_group *grp;
+	int cpu = rq->cpu;
+	u32 old_curr_window = p->ravg.curr_window;
 
 	new_window = mark_start < window_start;
 	if (new_window) {
-		nr_full_windows = div64_u64((window_start - mark_start),
-						window_size);
+		full_window = (window_start - mark_start) >= window_size;
 		if (p->ravg.active_windows < USHRT_MAX)
 			p->ravg.active_windows++;
 	}
 
-	/* Handle per-task window rollover. We don't care about the idle
-	 * task or exiting tasks. */
-	if (new_window && !is_idle_task(p) && !exiting_task(p)) {
-		u32 curr_window = 0;
+	new_task = is_new_task(p);
 
-		if (!nr_full_windows)
-			curr_window = p->ravg.curr_window;
-
-		p->ravg.prev_window = curr_window;
-		p->ravg.curr_window = 0;
+	/*
+	 * Handle per-task window rollover. We don't care about the idle
+	 * task or exiting tasks.
+	 */
+	if (!is_idle_task(p) && !exiting_task(p)) {
+		if (new_window)
+			rollover_task_window(p, full_window);
 	}
 
-	if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
-		/* account_busy_for_cpu_time() = 0, so no update to the
-		 * task's current window needs to be made. This could be
-		 * for example
-		 *
-		 *   - a wakeup event on a task within the current
-		 *     window (!new_window below, no action required),
-		 *   - switching to a new task from idle (PICK_NEXT_TASK)
-		 *     in a new window where irqtime is 0 and we aren't
-		 *     waiting on IO */
+	if (p_is_curr_task && new_window) {
+		rollover_cpu_window(rq, full_window);
+		rollover_top_tasks(rq, full_window);
+	}
 
-		if (!new_window)
-			return;
+	if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+		goto done;
 
-		/* A new window has started. The RQ demand must be rolled
-		 * over if p is the current task. */
-		if (p_is_curr_task) {
-			u64 prev_sum = 0;
+	grp = p->grp;
+	if (grp) {
+		struct group_cpu_time *cpu_time = &rq->grp_time;
 
-			/* p is either idle task or an exiting task */
-			if (!nr_full_windows) {
-				prev_sum = rq->curr_runnable_sum;
-			}
+		curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		prev_runnable_sum = &cpu_time->prev_runnable_sum;
 
-			rq->prev_runnable_sum = prev_sum;
-			rq->curr_runnable_sum = 0;
-		}
-
-		return;
+		nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
 	}
 
 	if (!new_window) {
-		/* account_busy_for_cpu_time() = 1 so busy time needs
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
 		 * to be accounted to the current window. No rollover
 		 * since we didn't start a new window. An example of this is
 		 * when a task starts execution and then sleeps within the
-		 * same window. */
+		 * same window.
+		 */
 
 		if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
 			delta = wallclock - mark_start;
 		else
 			delta = irqtime;
 		delta = scale_exec_time(delta, rq);
-		rq->curr_runnable_sum += delta;
-		if (!is_idle_task(p) && !exiting_task(p))
-			p->ravg.curr_window += delta;
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
 
-		return;
+		if (!is_idle_task(p) && !exiting_task(p)) {
+			p->ravg.curr_window += delta;
+			p->ravg.curr_window_cpu[cpu] += delta;
+		}
+
+		goto done;
 	}
 
 	if (!p_is_curr_task) {
-		/* account_busy_for_cpu_time() = 1 so busy time needs
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
 		 * to be accounted to the current window. A new window
 		 * has also started, but p is not the current task, so the
 		 * window is not rolled over - just split up and account
@@ -407,35 +1454,53 @@
 		 * task.
 		 *
 		 * Irqtime can't be accounted by a task that isn't the
-		 * currently running task. */
+		 * currently running task.
+		 */
 
-		if (!nr_full_windows) {
-			/* A full window hasn't elapsed, account partial
-			 * contribution to previous completed window. */
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
 			delta = scale_exec_time(window_start - mark_start, rq);
-			if (!exiting_task(p))
+			if (!exiting_task(p)) {
 				p->ravg.prev_window += delta;
+				p->ravg.prev_window_cpu[cpu] += delta;
+			}
 		} else {
-			/* Since at least one full window has elapsed,
+			/*
+			 * Since at least one full window has elapsed,
 			 * the contribution to the previous window is the
-			 * full window (window_size). */
+			 * full window (window_size).
+			 */
 			delta = scale_exec_time(window_size, rq);
-			if (!exiting_task(p))
+			if (!exiting_task(p)) {
 				p->ravg.prev_window = delta;
+				p->ravg.prev_window_cpu[cpu] = delta;
+			}
 		}
-		rq->prev_runnable_sum += delta;
+
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
 
 		/* Account piece of busy time in the current window. */
 		delta = scale_exec_time(wallclock - window_start, rq);
-		rq->curr_runnable_sum += delta;
-		if (!exiting_task(p))
-			p->ravg.curr_window = delta;
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
 
-		return;
+		if (!exiting_task(p)) {
+			p->ravg.curr_window = delta;
+			p->ravg.curr_window_cpu[cpu] = delta;
+		}
+
+		goto done;
 	}
 
 	if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
-		/* account_busy_for_cpu_time() = 1 so busy time needs
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
 		 * to be accounted to the current window. A new window
 		 * has started and p is the current task so rollover is
 		 * needed. If any of these three above conditions are true
@@ -445,44 +1510,57 @@
 		 * be accounted.
 		 *
 		 * An example of this would be a task that starts execution
-		 * and then sleeps once a new window has begun. */
-
-		if (!nr_full_windows) {
-			/* A full window hasn't elapsed, account partial
-			 * contribution to previous completed window. */
-			delta = scale_exec_time(window_start - mark_start, rq);
-			if (!is_idle_task(p) && !exiting_task(p))
-				p->ravg.prev_window += delta;
-
-			delta += rq->curr_runnable_sum;
-		} else {
-			/* Since at least one full window has elapsed,
-			 * the contribution to the previous window is the
-			 * full window (window_size). */
-			delta = scale_exec_time(window_size, rq);
-			if (!is_idle_task(p) && !exiting_task(p))
-				p->ravg.prev_window = delta;
-
-		}
-		/*
-		 * Rollover for normal runnable sum is done here by overwriting
-		 * the values in prev_runnable_sum and curr_runnable_sum.
-		 * Rollover for new task runnable sum has completed by previous
-		 * if-else statement.
+		 * and then sleeps once a new window has begun.
 		 */
-		rq->prev_runnable_sum = delta;
+
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
+			delta = scale_exec_time(window_start - mark_start, rq);
+			if (!is_idle_task(p) && !exiting_task(p)) {
+				p->ravg.prev_window += delta;
+				p->ravg.prev_window_cpu[cpu] += delta;
+			}
+		} else {
+			/*
+			 * Since at least one full window has elapsed,
+			 * the contribution to the previous window is the
+			 * full window (window_size).
+			 */
+			delta = scale_exec_time(window_size, rq);
+			if (!is_idle_task(p) && !exiting_task(p)) {
+				p->ravg.prev_window = delta;
+				p->ravg.prev_window_cpu[cpu] = delta;
+			}
+		}
+
+		/*
+		 * Rollover is done here by overwriting the values in
+		 * prev_runnable_sum and curr_runnable_sum.
+		 */
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
 
 		/* Account piece of busy time in the current window. */
 		delta = scale_exec_time(wallclock - window_start, rq);
-		rq->curr_runnable_sum = delta;
-		if (!is_idle_task(p) && !exiting_task(p))
-			p->ravg.curr_window = delta;
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
 
-		return;
+		if (!is_idle_task(p) && !exiting_task(p)) {
+			p->ravg.curr_window = delta;
+			p->ravg.curr_window_cpu[cpu] = delta;
+		}
+
+		goto done;
 	}
 
 	if (irqtime) {
-		/* account_busy_for_cpu_time() = 1 so busy time needs
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
 		 * to be accounted to the current window. A new window
 		 * has started and p is the current task so rollover is
 		 * needed. The current task must be the idle task because
@@ -490,26 +1568,30 @@
 		 *
 		 * Irqtime will be accounted each time we process IRQ activity
 		 * after a period of idleness, so we know the IRQ busy time
-		 * started at wallclock - irqtime. */
+		 * started at wallclock - irqtime.
+		 */
 
 		BUG_ON(!is_idle_task(p));
 		mark_start = wallclock - irqtime;
 
-		/* Roll window over. If IRQ busy time was just in the current
-		 * window then that is all that need be accounted. */
-		rq->prev_runnable_sum = rq->curr_runnable_sum;
+		/*
+		 * Roll window over. If IRQ busy time was just in the current
+		 * window then that is all that need be accounted.
+		 */
 		if (mark_start > window_start) {
-			rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
+			*curr_runnable_sum = scale_exec_time(irqtime, rq);
 			return;
 		}
 
-		/* The IRQ busy time spanned multiple windows. Process the
-		 * busy time preceding the current window start first. */
+		/*
+		 * The IRQ busy time spanned multiple windows. Process the
+		 * busy time preceding the current window start first.
+		 */
 		delta = window_start - mark_start;
 		if (delta > window_size)
 			delta = window_size;
 		delta = scale_exec_time(delta, rq);
-		rq->prev_runnable_sum += delta;
+		*prev_runnable_sum += delta;
 
 		/* Process the remaining IRQ busy time in the current window. */
 		delta = wallclock - window_start;
@@ -518,24 +1600,57 @@
 		return;
 	}
 
-	BUG();
+done:
+	if (!is_idle_task(p) && !exiting_task(p))
+		update_top_tasks(p, rq, old_curr_window,
+					new_window, full_window);
 }
 
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+
+static inline u32 predict_and_update_buckets(struct rq *rq,
+			struct task_struct *p, u32 runtime) {
+
+	int bidx;
+	u32 pred_demand;
+
+	bidx = busy_to_bucket(runtime);
+	pred_demand = get_pred_busy(rq, p, bidx, runtime);
+	bucket_increase(p->ravg.busy_buckets, bidx);
+
+	return pred_demand;
+}
+
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
 {
-	/* No need to bother updating task demand for exiting tasks
-	 * or the idle task. */
+	/*
+	 * No need to bother updating task demand for exiting tasks
+	 * or the idle task.
+	 */
 	if (exiting_task(p) || is_idle_task(p))
 		return 0;
 
-	/* When a task is waking up it is completing a segment of non-busy
+	/*
+	 * When a task is waking up it is completing a segment of non-busy
 	 * time. Likewise, if wait time is not treated as busy time, then
 	 * when a task begins to run or is migrated, it is not running and
-	 * is completing a segment of non-busy time. */
-	if (event == TASK_WAKE || (!walt_account_wait_time &&
+	 * is completing a segment of non-busy time.
+	 */
+	if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
 			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
 		return 0;
 
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+	}
+
 	return 1;
 }
 
@@ -550,15 +1665,18 @@
 {
 	u32 *hist = &p->ravg.sum_history[0];
 	int ridx, widx;
-	u32 max = 0, avg, demand;
+	u32 max = 0, avg, demand, pred_demand;
 	u64 sum = 0;
+	u64 prev_demand;
 
 	/* Ignore windows where task had no activity */
 	if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
-			goto done;
+		goto done;
+
+	prev_demand = p->ravg.demand;
 
 	/* Push new 'runtime' value onto stack */
-	widx = walt_ravg_hist_size - 1;
+	widx = sched_ravg_hist_size - 1;
 	ridx = widx - samples;
 	for (; ridx >= 0; --widx, --ridx) {
 		hist[widx] = hist[ridx];
@@ -567,7 +1685,7 @@
 			max = hist[widx];
 	}
 
-	for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
+	for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
 		hist[widx] = runtime;
 		sum += hist[widx];
 		if (hist[widx] > max)
@@ -576,17 +1694,18 @@
 
 	p->ravg.sum = 0;
 
-	if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
+	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
 		demand = runtime;
-	} else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
+	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
 		demand = max;
 	} else {
-		avg = div64_u64(sum, walt_ravg_hist_size);
-		if (walt_window_stats_policy == WINDOW_STATS_AVG)
+		avg = div64_u64(sum, sched_ravg_hist_size);
+		if (sched_window_stats_policy == WINDOW_STATS_AVG)
 			demand = avg;
 		else
 			demand = max(avg, runtime);
 	}
+	pred_demand = predict_and_update_buckets(rq, p, runtime);
 
 	/*
 	 * A throttled deadline sched class task gets dequeued without
@@ -595,22 +1714,27 @@
 	 */
 	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
 						!p->dl.dl_throttled))
-		fixup_cumulative_runnable_avg(rq, p, demand);
+		p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
+						      pred_demand);
 
 	p->ravg.demand = demand;
+	p->ravg.pred_demand = pred_demand;
+
+	if (__task_in_cum_window_demand(rq, p))
+		inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);
 
 done:
-	trace_walt_update_history(rq, p, runtime, samples, event);
-	return;
+	trace_sched_update_history(rq, p, runtime, samples, event);
 }
 
-static void add_to_task_demand(struct rq *rq, struct task_struct *p,
-				u64 delta)
+static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
 {
 	delta = scale_exec_time(delta, rq);
 	p->ravg.sum += delta;
-	if (unlikely(p->ravg.sum > walt_ravg_window))
-		p->ravg.sum = walt_ravg_window;
+	if (unlikely(p->ravg.sum > sched_ravg_window))
+		p->ravg.sum = sched_ravg_window;
+
+	return delta;
 }
 
 /*
@@ -663,326 +1787,507 @@
  * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
  * depends on it!
  */
-static void update_task_demand(struct task_struct *p, struct rq *rq,
-	     int event, u64 wallclock)
+static u64 update_task_demand(struct task_struct *p, struct rq *rq,
+			       int event, u64 wallclock)
 {
 	u64 mark_start = p->ravg.mark_start;
 	u64 delta, window_start = rq->window_start;
 	int new_window, nr_full_windows;
-	u32 window_size = walt_ravg_window;
+	u32 window_size = sched_ravg_window;
+	u64 runtime;
 
 	new_window = mark_start < window_start;
-	if (!account_busy_for_task_demand(p, event)) {
+	if (!account_busy_for_task_demand(rq, p, event)) {
 		if (new_window)
-			/* If the time accounted isn't being accounted as
+			/*
+			 * If the time accounted isn't being accounted as
 			 * busy time, and a new window started, only the
 			 * previous window need be closed out with the
 			 * pre-existing demand. Multiple windows may have
 			 * elapsed, but since empty windows are dropped,
-			 * it is not necessary to account those. */
+			 * it is not necessary to account those.
+			 */
 			update_history(rq, p, p->ravg.sum, 1, event);
-		return;
+		return 0;
 	}
 
 	if (!new_window) {
-		/* The simple case - busy time contained within the existing
-		 * window. */
-		add_to_task_demand(rq, p, wallclock - mark_start);
-		return;
+		/*
+		 * The simple case - busy time contained within the existing
+		 * window.
+		 */
+		return add_to_task_demand(rq, p, wallclock - mark_start);
 	}
 
-	/* Busy time spans at least two windows. Temporarily rewind
-	 * window_start to first window boundary after mark_start. */
+	/*
+	 * Busy time spans at least two windows. Temporarily rewind
+	 * window_start to first window boundary after mark_start.
+	 */
 	delta = window_start - mark_start;
 	nr_full_windows = div64_u64(delta, window_size);
 	window_start -= (u64)nr_full_windows * (u64)window_size;
 
 	/* Process (window_start - mark_start) first */
-	add_to_task_demand(rq, p, window_start - mark_start);
+	runtime = add_to_task_demand(rq, p, window_start - mark_start);
 
 	/* Push new sample(s) into task's demand history */
 	update_history(rq, p, p->ravg.sum, 1, event);
-	if (nr_full_windows)
-		update_history(rq, p, scale_exec_time(window_size, rq),
-			       nr_full_windows, event);
+	if (nr_full_windows) {
+		u64 scaled_window = scale_exec_time(window_size, rq);
 
-	/* Roll window_start back to current to process any remainder
-	 * in current window. */
+		update_history(rq, p, scaled_window, nr_full_windows, event);
+		runtime += nr_full_windows * scaled_window;
+	}
+
+	/*
+	 * Roll window_start back to current to process any remainder
+	 * in current window.
+	 */
 	window_start += (u64)nr_full_windows * (u64)window_size;
 
 	/* Process (wallclock - window_start) next */
 	mark_start = window_start;
-	add_to_task_demand(rq, p, wallclock - mark_start);
+	runtime += add_to_task_demand(rq, p, wallclock - mark_start);
+
+	return runtime;
+}
+
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+			  u64 wallclock, u64 irqtime)
+{
+	u64 cur_cycles;
+	int cpu = cpu_of(rq);
+
+	lockdep_assert_held(&rq->lock);
+
+	if (!use_cycle_counter) {
+		rq->cc.cycles = cpu_cur_freq(cpu);
+		rq->cc.time = 1;
+		return;
+	}
+
+	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+	/*
+	 * If current task is idle task and irqtime == 0 CPU was
+	 * indeed idle and probably its cycle counter was not
+	 * increasing.  We still need estimatied CPU frequency
+	 * for IO wait time accounting.  Use the previously
+	 * calculated frequency in such a case.
+	 */
+	if (!is_idle_task(rq->curr) || irqtime) {
+		if (unlikely(cur_cycles < p->cpu_cycles))
+			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+		else
+			rq->cc.cycles = cur_cycles - p->cpu_cycles;
+		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+		if (event == IRQ_UPDATE && is_idle_task(p))
+			/*
+			 * Time between mark_start of idle task and IRQ handler
+			 * entry time is CPU cycle counter stall period.
+			 * Upon IRQ handler entry sched_account_irqstart()
+			 * replenishes idle task's cpu cycle counter so
+			 * rq->cc.cycles now represents increased cycles during
+			 * IRQ handler rather than time between idle entry and
+			 * IRQ exit.  Thus use irqtime as time delta.
+			 */
+			rq->cc.time = irqtime;
+		else
+			rq->cc.time = wallclock - p->ravg.mark_start;
+		BUG_ON((s64)rq->cc.time < 0);
+	}
+
+	p->cpu_cycles = cur_cycles;
+
+	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
 }
 
 /* Reflect task activity on its demand and cpu's busy time statistics */
-void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
-	     int event, u64 wallclock, u64 irqtime)
+void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime)
 {
-	if (walt_disabled || !rq->window_start)
+	u64 runtime;
+
+	if (!rq->window_start || sched_disable_window_stats ||
+	    p->ravg.mark_start == wallclock)
 		return;
 
 	lockdep_assert_held(&rq->lock);
 
-	update_window_start(rq, wallclock);
+	update_window_start(rq, wallclock, event);
 
-	if (!p->ravg.mark_start)
+	if (!p->ravg.mark_start) {
+		update_task_cpu_cycles(p, cpu_of(rq));
 		goto done;
+	}
 
-	update_task_demand(p, rq, event, wallclock);
+	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+	runtime = update_task_demand(p, rq, event, wallclock);
+	if (runtime)
+		update_task_burst(p, rq, event, runtime);
 	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
-
+	update_task_pred_demand(rq, p, event);
 done:
-	trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
+	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
+				rq->cc.cycles, rq->cc.time, &rq->grp_time);
+	trace_sched_update_task_ravg_mini(p, rq, event, wallclock, irqtime,
+				rq->cc.cycles, rq->cc.time, &rq->grp_time);
 
 	p->ravg.mark_start = wallclock;
 }
 
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
+u32 sched_get_init_task_load(struct task_struct *p)
 {
-	return SCHED_CAPACITY_SCALE;
+	return p->init_load_pct;
 }
 
-void walt_init_cpu_efficiency(void)
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
 {
-	int i, efficiency;
-	unsigned int max = 0, min = UINT_MAX;
+	if (init_load_pct < 0 || init_load_pct > 100)
+		return -EINVAL;
 
-	for_each_possible_cpu(i) {
-		efficiency = arch_get_cpu_efficiency(i);
-		cpu_rq(i)->efficiency = efficiency;
+	p->init_load_pct = init_load_pct;
 
-		if (efficiency > max)
-			max = efficiency;
-		if (efficiency < min)
-			min = efficiency;
-	}
-
-	if (max)
-		max_possible_efficiency = max;
-
-	if (min)
-		min_possible_efficiency = min;
+	return 0;
 }
 
-static void reset_task_stats(struct task_struct *p)
+void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+	int i;
+	u32 init_load_windows = sched_init_task_load_windows;
+	u32 init_load_pct = current->init_load_pct;
+
+	p->init_load_pct = 0;
+	rcu_assign_pointer(p->grp, NULL);
+	INIT_LIST_HEAD(&p->grp_list);
+	memset(&p->ravg, 0, sizeof(struct ravg));
+	p->cpu_cycles = 0;
+
+	init_new_task_load_hmp(p, idle_task);
+
+	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+
+	/* Don't have much choice. CPU frequency would be bogus */
+	BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+
+	if (idle_task)
+		return;
+
+	if (init_load_pct)
+		init_load_windows = div64_u64((u64)init_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	p->ravg.demand = init_load_windows;
+	p->ravg.pred_demand = 0;
+	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+		p->ravg.sum_history[i] = init_load_windows;
+	p->misfit = false;
+}
+
+void reset_task_stats(struct task_struct *p)
 {
 	u32 sum = 0;
+	u32 *curr_window_ptr = NULL;
+	u32 *prev_window_ptr = NULL;
 
-	if (exiting_task(p))
+	if (exiting_task(p)) {
 		sum = EXITING_TASK_MARKER;
+	} else {
+		curr_window_ptr =  p->ravg.curr_window_cpu;
+		prev_window_ptr = p->ravg.prev_window_cpu;
+		memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+		memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+	}
 
 	memset(&p->ravg, 0, sizeof(struct ravg));
+
+	p->ravg.curr_window_cpu = curr_window_ptr;
+	p->ravg.prev_window_cpu = prev_window_ptr;
+
+	reset_task_stats_hmp(p);
+
 	/* Retain EXITING_TASK marker */
 	p->ravg.sum_history[0] = sum;
 }
 
-void walt_mark_task_starting(struct task_struct *p)
+void mark_task_starting(struct task_struct *p)
 {
 	u64 wallclock;
 	struct rq *rq = task_rq(p);
 
-	if (!rq->window_start) {
+	if (!rq->window_start || sched_disable_window_stats) {
 		reset_task_stats(p);
 		return;
 	}
 
-	wallclock = walt_ktime_clock();
-	p->ravg.mark_start = wallclock;
+	wallclock = sched_ktime_clock();
+	p->ravg.mark_start = p->last_wake_ts = wallclock;
+	p->last_cpu_selected_ts = wallclock;
+	p->last_switch_out_ts = 0;
+	update_task_cpu_cycles(p, cpu_of(rq));
 }
 
-void walt_set_window_start(struct rq *rq)
+static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
+DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
+struct sched_cluster *sched_cluster[NR_CPUS];
+int num_clusters;
+
+struct list_head cluster_head;
+
+static void
+insert_cluster(struct sched_cluster *cluster, struct list_head *head)
 {
-	int cpu = cpu_of(rq);
-	struct rq *sync_rq = cpu_rq(sync_cpu);
+	struct sched_cluster *tmp;
+	struct list_head *iter = head;
 
-	if (rq->window_start)
-		return;
-
-	if (cpu == sync_cpu) {
-		rq->window_start = walt_ktime_clock();
-	} else {
-		raw_spin_unlock(&rq->lock);
-		double_rq_lock(rq, sync_rq);
-		rq->window_start = cpu_rq(sync_cpu)->window_start;
-		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
-		raw_spin_unlock(&sync_rq->lock);
+	list_for_each_entry(tmp, head, list) {
+		if (cluster->max_power_cost < tmp->max_power_cost)
+			break;
+		iter = &tmp->list;
 	}
 
-	rq->curr->ravg.mark_start = rq->window_start;
+	list_add(&cluster->list, iter);
 }
 
-void walt_migrate_sync_cpu(int cpu)
+static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
 {
-	if (cpu == sync_cpu)
-		sync_cpu = smp_processor_id();
+	struct sched_cluster *cluster = NULL;
+
+	cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
+	if (!cluster) {
+		__WARN_printf("Cluster allocation failed.  Possible bad scheduling\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&cluster->list);
+	cluster->max_power_cost		=	1;
+	cluster->min_power_cost		=	1;
+	cluster->capacity		=	1024;
+	cluster->max_possible_capacity	=	1024;
+	cluster->efficiency		=	1;
+	cluster->load_scale_factor	=	1024;
+	cluster->cur_freq		=	1;
+	cluster->max_freq		=	1;
+	cluster->max_mitigated_freq	=	UINT_MAX;
+	cluster->min_freq		=	1;
+	cluster->max_possible_freq	=	1;
+	cluster->dstate			=	0;
+	cluster->dstate_wakeup_energy	=	0;
+	cluster->dstate_wakeup_latency	=	0;
+	cluster->freq_init_done		=	false;
+
+	raw_spin_lock_init(&cluster->load_lock);
+	cluster->cpus = *cpus;
+	cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
+
+	if (cluster->efficiency > max_possible_efficiency)
+		max_possible_efficiency = cluster->efficiency;
+	if (cluster->efficiency < min_possible_efficiency)
+		min_possible_efficiency = cluster->efficiency;
+
+	cluster->notifier_sent = 0;
+	return cluster;
 }
 
-void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
+static void add_cluster(const struct cpumask *cpus, struct list_head *head)
 {
-	struct rq *src_rq = task_rq(p);
-	struct rq *dest_rq = cpu_rq(new_cpu);
-	u64 wallclock;
-
-	if (!p->on_rq && p->state != TASK_WAKING)
-		return;
-
-	if (exiting_task(p)) {
-		return;
-	}
-
-	if (p->state == TASK_WAKING)
-		double_rq_lock(src_rq, dest_rq);
-
-	wallclock = walt_ktime_clock();
-
-	walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
-			TASK_UPDATE, wallclock, 0);
-	walt_update_task_ravg(dest_rq->curr, dest_rq,
-			TASK_UPDATE, wallclock, 0);
-
-	walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
-
-	if (p->ravg.curr_window) {
-		src_rq->curr_runnable_sum -= p->ravg.curr_window;
-		dest_rq->curr_runnable_sum += p->ravg.curr_window;
-	}
-
-	if (p->ravg.prev_window) {
-		src_rq->prev_runnable_sum -= p->ravg.prev_window;
-		dest_rq->prev_runnable_sum += p->ravg.prev_window;
-	}
-
-	if ((s64)src_rq->prev_runnable_sum < 0) {
-		src_rq->prev_runnable_sum = 0;
-		WARN_ON(1);
-	}
-	if ((s64)src_rq->curr_runnable_sum < 0) {
-		src_rq->curr_runnable_sum = 0;
-		WARN_ON(1);
-	}
-
-	trace_walt_migration_update_sum(src_rq, p);
-	trace_walt_migration_update_sum(dest_rq, p);
-
-	if (p->state == TASK_WAKING)
-		double_rq_unlock(src_rq, dest_rq);
-}
-
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
-	int i;
-	int max = 0, min = INT_MAX;
-
-	for_each_online_cpu(i) {
-		if (cpu_rq(i)->capacity > max)
-			max = cpu_rq(i)->capacity;
-		if (cpu_rq(i)->capacity < min)
-			min = cpu_rq(i)->capacity;
-	}
-
-	max_capacity = max;
-	min_capacity = min;
-}
-
-static void update_min_max_capacity(void)
-{
-	unsigned long flags;
+	struct sched_cluster *cluster = alloc_new_cluster(cpus);
 	int i;
 
-	local_irq_save(flags);
-	for_each_possible_cpu(i)
-		raw_spin_lock(&cpu_rq(i)->lock);
+	if (!cluster)
+		return;
 
-	__update_min_max_capacity();
+	for_each_cpu(i, cpus)
+		cpu_rq(i)->cluster = cluster;
 
-	for_each_possible_cpu(i)
-		raw_spin_unlock(&cpu_rq(i)->lock);
-	local_irq_restore(flags);
+	insert_cluster(cluster, head);
+	set_bit(num_clusters, all_cluster_ids);
+	num_clusters++;
 }
 
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long capacity_scale_cpu_efficiency(int cpu)
-{
-	return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(int cpu)
-{
-	return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static unsigned long load_scale_cpu_efficiency(int cpu)
-{
-	return DIV_ROUND_UP(1024 * max_possible_efficiency,
-			    cpu_rq(cpu)->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static unsigned long load_scale_cpu_freq(int cpu)
-{
-	return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
-}
-
-static int compute_capacity(int cpu)
+static int compute_max_possible_capacity(struct sched_cluster *cluster)
 {
 	int capacity = 1024;
 
-	capacity *= capacity_scale_cpu_efficiency(cpu);
+	capacity *= capacity_scale_cpu_efficiency(cluster);
 	capacity >>= 10;
 
-	capacity *= capacity_scale_cpu_freq(cpu);
+	capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
 	capacity >>= 10;
 
 	return capacity;
 }
 
-static int compute_load_scale_factor(int cpu)
+static void update_min_max_capacity(void)
 {
-	int load_scale = 1024;
+	unsigned long flags;
+
+	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+	__update_min_max_capacity();
+	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+unsigned int max_power_cost = 1;
+
+static int
+compare_clusters(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct sched_cluster *cluster1, *cluster2;
+	int ret;
+
+	cluster1 = container_of(a, struct sched_cluster, list);
+	cluster2 = container_of(b, struct sched_cluster, list);
 
 	/*
-	 * load_scale_factor accounts for the fact that task load
-	 * is in reference to "best" performing cpu. Task's load will need to be
-	 * scaled (up) by a factor to determine suitability to be placed on a
-	 * (little) cpu.
+	 * Don't assume higher capacity means higher power. If the
+	 * power cost is same, sort the higher capacity cluster before
+	 * the lower capacity cluster to start placing the tasks
+	 * on the higher capacity cluster.
 	 */
-	load_scale *= load_scale_cpu_efficiency(cpu);
-	load_scale >>= 10;
+	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
+		(cluster1->max_power_cost == cluster2->max_power_cost &&
+		cluster1->max_possible_capacity <
+				cluster2->max_possible_capacity);
 
-	load_scale *= load_scale_cpu_freq(cpu);
-	load_scale >>= 10;
-
-	return load_scale;
+	return ret;
 }
 
+void sort_clusters(void)
+{
+	struct sched_cluster *cluster;
+	struct list_head new_head;
+	unsigned int tmp_max = 1;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_sched_cluster(cluster) {
+		cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
+							       max_task_load());
+		cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
+							       0);
+
+		if (cluster->max_power_cost > tmp_max)
+			tmp_max = cluster->max_power_cost;
+	}
+	max_power_cost = tmp_max;
+
+	move_list(&new_head, &cluster_head, true);
+
+	list_sort(NULL, &new_head, compare_clusters);
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+}
+
+static void update_all_clusters_stats(void)
+{
+	struct sched_cluster *cluster;
+	u64 highest_mpc = 0, lowest_mpc = U64_MAX;
+	unsigned long flags;
+
+	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+	for_each_sched_cluster(cluster) {
+		u64 mpc;
+
+		cluster->capacity = compute_capacity(cluster);
+		mpc = cluster->max_possible_capacity =
+			compute_max_possible_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+		cluster->exec_scale_factor =
+			DIV_ROUND_UP(cluster->efficiency * 1024,
+				     max_possible_efficiency);
+
+		if (mpc > highest_mpc)
+			highest_mpc = mpc;
+
+		if (mpc < lowest_mpc)
+			lowest_mpc = mpc;
+	}
+
+	max_possible_capacity = highest_mpc;
+	min_max_possible_capacity = lowest_mpc;
+
+	__update_min_max_capacity();
+	sched_update_freq_max_load(cpu_possible_mask);
+	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+void update_cluster_topology(void)
+{
+	struct cpumask cpus = *cpu_possible_mask;
+	const struct cpumask *cluster_cpus;
+	struct list_head new_head;
+	int i;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_cpu(i, &cpus) {
+		cluster_cpus = cpu_coregroup_mask(i);
+		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+		cpumask_andnot(&cpus, &cpus, cluster_cpus);
+		add_cluster(cluster_cpus, &new_head);
+	}
+
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+	update_all_clusters_stats();
+}
+
+struct sched_cluster init_cluster = {
+	.list			=	LIST_HEAD_INIT(init_cluster.list),
+	.id			=	0,
+	.max_power_cost		=	1,
+	.min_power_cost		=	1,
+	.capacity		=	1024,
+	.max_possible_capacity	=	1024,
+	.efficiency		=	1,
+	.load_scale_factor	=	1024,
+	.cur_freq		=	1,
+	.max_freq		=	1,
+	.max_mitigated_freq	=	UINT_MAX,
+	.min_freq		=	1,
+	.max_possible_freq	=	1,
+	.dstate			=	0,
+	.dstate_wakeup_energy	=	0,
+	.dstate_wakeup_latency	=	0,
+	.exec_scale_factor	=	1024,
+	.notifier_sent		=	0,
+	.wake_up_idle		=	0,
+	.aggr_grp_load		=	0,
+};
+
+void init_clusters(void)
+{
+	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
+	init_cluster.cpus = *cpu_possible_mask;
+	raw_spin_lock_init(&init_cluster.load_lock);
+	INIT_LIST_HEAD(&cluster_head);
+}
+
+static unsigned long cpu_max_table_freq[NR_CPUS];
+
 static int cpufreq_notifier_policy(struct notifier_block *nb,
 		unsigned long val, void *data)
 {
 	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
-	int i, update_max = 0;
-	u64 highest_mpc = 0, highest_mplsf = 0;
-	const struct cpumask *cpus = policy->related_cpus;
-	unsigned int orig_min_max_freq = min_max_freq;
-	unsigned int orig_max_possible_freq = max_possible_freq;
-	/* Initialized to policy->max in case policy->related_cpus is empty! */
-	unsigned int orig_max_freq = policy->max;
+	struct sched_cluster *cluster = NULL;
+	struct cpumask policy_cluster = *policy->related_cpus;
+	unsigned int orig_max_freq = 0;
+	int i, j, update_capacity = 0;
 
 	if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
 						val != CPUFREQ_CREATE_POLICY)
@@ -993,16 +2298,6 @@
 		return 0;
 	}
 
-	for_each_cpu(i, policy->related_cpus) {
-		cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
-			     policy->related_cpus);
-		orig_max_freq = cpu_rq(i)->max_freq;
-		cpu_rq(i)->min_freq = policy->min;
-		cpu_rq(i)->max_freq = policy->max;
-		cpu_rq(i)->cur_freq = policy->cur;
-		cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
-	}
-
 	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
 	if (min_max_freq == 1)
 		min_max_freq = UINT_MAX;
@@ -1010,108 +2305,40 @@
 	BUG_ON(!min_max_freq);
 	BUG_ON(!policy->max);
 
-	/* Changes to policy other than max_freq don't require any updates */
-	if (orig_max_freq == policy->max)
-		return 0;
+	for_each_cpu(i, &policy_cluster)
+		cpu_max_table_freq[i] = policy->cpuinfo.max_freq;
 
-	/*
-	 * A changed min_max_freq or max_possible_freq (possible during bootup)
-	 * needs to trigger re-computation of load_scale_factor and capacity for
-	 * all possible cpus (even those offline). It also needs to trigger
-	 * re-computation of nr_big_task count on all online cpus.
-	 *
-	 * A changed rq->max_freq otoh needs to trigger re-computation of
-	 * load_scale_factor and capacity for just the cluster of cpus involved.
-	 * Since small task definition depends on max_load_scale_factor, a
-	 * changed load_scale_factor of one cluster could influence
-	 * classification of tasks in another cluster. Hence a changed
-	 * rq->max_freq will need to trigger re-computation of nr_big_task
-	 * count on all online cpus.
-	 *
-	 * While it should be sufficient for nr_big_tasks to be
-	 * re-computed for only online cpus, we have inadequate context
-	 * information here (in policy notifier) with regard to hotplug-safety
-	 * context in which notification is issued. As a result, we can't use
-	 * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
-	 * fixed up to issue notification always in hotplug-safe context,
-	 * re-compute nr_big_task for all possible cpus.
-	 */
+	for_each_cpu(i, &policy_cluster) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&policy_cluster, &policy_cluster,
+						&cluster->cpus);
 
-	if (orig_min_max_freq != min_max_freq ||
-		orig_max_possible_freq != max_possible_freq) {
-			cpus = cpu_possible_mask;
-			update_max = 1;
-	}
+		orig_max_freq = cluster->max_freq;
+		cluster->min_freq = policy->min;
+		cluster->max_freq = policy->max;
+		cluster->cur_freq = policy->cur;
 
-	/*
-	 * Changed load_scale_factor can trigger reclassification of tasks as
-	 * big or small. Make this change "atomic" so that tasks are accounted
-	 * properly due to changed load_scale_factor
-	 */
-	for_each_cpu(i, cpus) {
-		struct rq *rq = cpu_rq(i);
+		if (!cluster->freq_init_done) {
+			mutex_lock(&cluster_lock);
+			for_each_cpu(j, &cluster->cpus)
+				cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
+						policy->related_cpus);
+			cluster->max_possible_freq = policy->cpuinfo.max_freq;
+			cluster->max_possible_capacity =
+				compute_max_possible_capacity(cluster);
+			cluster->freq_init_done = true;
 
-		rq->capacity = compute_capacity(i);
-		rq->load_scale_factor = compute_load_scale_factor(i);
-
-		if (update_max) {
-			u64 mpc, mplsf;
-
-			mpc = div_u64(((u64) rq->capacity) *
-				rq->max_possible_freq, rq->max_freq);
-			rq->max_possible_capacity = (int) mpc;
-
-			mplsf = div_u64(((u64) rq->load_scale_factor) *
-				rq->max_possible_freq, rq->max_freq);
-
-			if (mpc > highest_mpc) {
-				highest_mpc = mpc;
-				cpumask_clear(&mpc_mask);
-				cpumask_set_cpu(i, &mpc_mask);
-			} else if (mpc == highest_mpc) {
-				cpumask_set_cpu(i, &mpc_mask);
-			}
-
-			if (mplsf > highest_mplsf)
-				highest_mplsf = mplsf;
+			sort_clusters();
+			update_all_clusters_stats();
+			mutex_unlock(&cluster_lock);
+			continue;
 		}
+
+		update_capacity += (orig_max_freq != cluster->max_freq);
 	}
 
-	if (update_max) {
-		max_possible_capacity = highest_mpc;
-		max_load_scale_factor = highest_mplsf;
-	}
-
-	__update_min_max_capacity();
-
-	return 0;
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
-		unsigned long val, void *data)
-{
-	struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
-	unsigned int cpu = freq->cpu, new_freq = freq->new;
-	unsigned long flags;
-	int i;
-
-	if (val != CPUFREQ_POSTCHANGE)
-		return 0;
-
-	BUG_ON(!new_freq);
-
-	if (cpu_rq(cpu)->cur_freq == new_freq)
-		return 0;
-
-	for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
-		struct rq *rq = cpu_rq(i);
-
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
-				      walt_ktime_clock(), 0);
-		rq->cur_freq = new_freq;
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-	}
+	if (update_capacity)
+		update_cpu_cluster_capacity(policy->related_cpus);
 
 	return 0;
 }
@@ -1120,49 +2347,683 @@
 	.notifier_call = cpufreq_notifier_policy
 };
 
-static struct notifier_block notifier_trans_block = {
-	.notifier_call = cpufreq_notifier_trans
-};
-
-static int register_sched_callback(void)
+static int register_walt_callback(void)
 {
-	int ret;
-
-	ret = cpufreq_register_notifier(&notifier_policy_block,
-						CPUFREQ_POLICY_NOTIFIER);
-
-	if (!ret)
-		ret = cpufreq_register_notifier(&notifier_trans_block,
-						CPUFREQ_TRANSITION_NOTIFIER);
-
-	return 0;
+	return cpufreq_register_notifier(&notifier_policy_block,
+					 CPUFREQ_POLICY_NOTIFIER);
 }
-
 /*
  * cpufreq callbacks can be registered at core_initcall or later time.
  * Any registration done prior to that is "forgotten" by cpufreq. See
  * initialization of variable init_cpufreq_transition_notifier_list_called
  * for further information.
  */
-core_initcall(register_sched_callback);
+core_initcall(register_walt_callback);
 
-void walt_init_new_task_load(struct task_struct *p)
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+				struct task_struct *p, int event);
+
+/*
+ * Enable colocation and frequency aggregation for all threads in a process.
+ * The children inherits the group id from the parent.
+ */
+unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+
+/* Maximum allowed threshold before freq aggregation must be enabled */
+#define MAX_FREQ_AGGR_THRESH 1000
+
+struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
+static LIST_HEAD(active_related_thread_groups);
+DEFINE_RWLOCK(related_thread_group_lock);
+
+unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
+
+/*
+ * Task groups whose aggregate demand on a cpu is more than
+ * sched_group_upmigrate need to be up-migrated if possible.
+ */
+unsigned int __read_mostly sched_group_upmigrate = 20000000;
+unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100;
+
+/*
+ * Task groups, once up-migrated, will need to drop their aggregate
+ * demand to less than sched_group_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_group_downmigrate = 19000000;
+unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
+
+static int
+group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp,
+						u64 demand, bool group_boost)
 {
-	int i;
-	u32 init_load_windows =
-			div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
-                          (u64)walt_ravg_window, 100);
-	u32 init_load_pct = current->init_load_pct;
+	int cpu = cluster_first_cpu(cluster);
+	int prev_capacity = 0;
+	unsigned int threshold = sched_group_upmigrate;
+	u64 load;
 
-	p->init_load_pct = 0;
-	memset(&p->ravg, 0, sizeof(struct ravg));
+	if (cluster->capacity == max_capacity)
+		return 1;
 
-	if (init_load_pct) {
-		init_load_windows = div64_u64((u64)init_load_pct *
-			  (u64)walt_ravg_window, 100);
+	if (group_boost)
+		return 0;
+
+	if (!demand)
+		return 1;
+
+	if (grp->preferred_cluster)
+		prev_capacity = grp->preferred_cluster->capacity;
+
+	if (cluster->capacity < prev_capacity)
+		threshold = sched_group_downmigrate;
+
+	load = scale_load_to_cpu(demand, cpu);
+	if (load < threshold)
+		return 1;
+
+	return 0;
+}
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+/* Return cluster which can offer required capacity for group */
+static struct sched_cluster *best_cluster(struct related_thread_group *grp,
+					u64 total_demand, bool group_boost)
+{
+	struct sched_cluster *cluster = NULL;
+
+	for_each_sched_cluster(cluster) {
+		if (group_will_fit(cluster, grp, total_demand, group_boost))
+			return cluster;
 	}
 
-	p->ravg.demand = init_load_windows;
-	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
-		p->ravg.sum_history[i] = init_load_windows;
+	return sched_cluster[0];
+}
+
+int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	struct related_thread_group *grp;
+	int rc = 1;
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+	if (grp)
+		rc = (grp->preferred_cluster == cluster);
+
+	rcu_read_unlock();
+	return rc;
+}
+
+static void _set_preferred_cluster(struct related_thread_group *grp)
+{
+	struct task_struct *p;
+	u64 combined_demand = 0;
+	bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
+	bool group_boost = false;
+	u64 wallclock;
+
+	if (list_empty(&grp->tasks))
+		return;
+
+	wallclock = sched_ktime_clock();
+
+	/*
+	 * wakeup of two or more related tasks could race with each other and
+	 * could result in multiple calls to _set_preferred_cluster being issued
+	 * at same time. Avoid overhead in such cases of rechecking preferred
+	 * cluster
+	 */
+	if (wallclock - grp->last_update < sched_ravg_window / 10)
+		return;
+
+	list_for_each_entry(p, &grp->tasks, grp_list) {
+		if (boost_on_big && task_sched_boost(p)) {
+			group_boost = true;
+			break;
+		}
+
+		if (p->ravg.mark_start < wallclock -
+		    (sched_ravg_window * sched_ravg_hist_size))
+			continue;
+
+		combined_demand += p->ravg.demand;
+
+	}
+
+	grp->preferred_cluster = best_cluster(grp,
+			combined_demand, group_boost);
+	grp->last_update = sched_ktime_clock();
+	trace_sched_set_preferred_cluster(grp, combined_demand);
+}
+
+void set_preferred_cluster(struct related_thread_group *grp)
+{
+	raw_spin_lock(&grp->lock);
+	_set_preferred_cluster(grp);
+	raw_spin_unlock(&grp->lock);
+}
+
+int update_preferred_cluster(struct related_thread_group *grp,
+		struct task_struct *p, u32 old_load)
+{
+	u32 new_load = task_load(p);
+
+	if (!grp)
+		return 0;
+
+	/*
+	 * Update if task's load has changed significantly or a complete window
+	 * has passed since we last updated preference
+	 */
+	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
+		sched_ktime_clock() - grp->last_update > sched_ravg_window)
+		return 1;
+
+	return 0;
+}
+
+DEFINE_MUTEX(policy_mutex);
+
+#define pct_to_real(tunable)	\
+		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+unsigned int update_freq_aggregate_threshold(unsigned int threshold)
+{
+	unsigned int old_threshold;
+
+	mutex_lock(&policy_mutex);
+
+	old_threshold = sysctl_sched_freq_aggregate_threshold_pct;
+
+	sysctl_sched_freq_aggregate_threshold_pct = threshold;
+	sched_freq_aggregate_threshold =
+		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+
+	mutex_unlock(&policy_mutex);
+
+	return old_threshold;
+}
+
+#define ADD_TASK	0
+#define REM_TASK	1
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+
+static inline struct related_thread_group*
+lookup_related_thread_group(unsigned int group_id)
+{
+	return related_thread_groups[group_id];
+}
+
+int alloc_related_thread_groups(void)
+{
+	int i, ret;
+	struct related_thread_group *grp;
+
+	/* groupd_id = 0 is invalid as it's special id to remove group. */
+	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+		grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
+		if (!grp) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		grp->id = i;
+		INIT_LIST_HEAD(&grp->tasks);
+		INIT_LIST_HEAD(&grp->list);
+		raw_spin_lock_init(&grp->lock);
+
+		related_thread_groups[i] = grp;
+	}
+
+	return 0;
+
+err:
+	for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+		grp = lookup_related_thread_group(i);
+		if (grp) {
+			kfree(grp);
+			related_thread_groups[i] = NULL;
+		} else {
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void remove_task_from_group(struct task_struct *p)
+{
+	struct related_thread_group *grp = p->grp;
+	struct rq *rq;
+	int empty_group = 1;
+	struct rq_flags rf;
+
+	raw_spin_lock(&grp->lock);
+
+	rq = __task_rq_lock(p, &rf);
+	transfer_busy_time(rq, p->grp, p, REM_TASK);
+	list_del_init(&p->grp_list);
+	rcu_assign_pointer(p->grp, NULL);
+	__task_rq_unlock(rq, &rf);
+
+
+	if (!list_empty(&grp->tasks)) {
+		empty_group = 0;
+		_set_preferred_cluster(grp);
+	}
+
+	raw_spin_unlock(&grp->lock);
+
+	/* Reserved groups cannot be destroyed */
+	if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
+		 /*
+		  * We test whether grp->list is attached with list_empty()
+		  * hence re-init the list after deletion.
+		  */
+		list_del_init(&grp->list);
+}
+
+static int
+add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
+{
+	struct rq *rq;
+	struct rq_flags rf;
+
+	raw_spin_lock(&grp->lock);
+
+	/*
+	 * Change p->grp under rq->lock. Will prevent races with read-side
+	 * reference of p->grp in various hot-paths
+	 */
+	rq = __task_rq_lock(p, &rf);
+	transfer_busy_time(rq, grp, p, ADD_TASK);
+	list_add(&p->grp_list, &grp->tasks);
+	rcu_assign_pointer(p->grp, grp);
+	__task_rq_unlock(rq, &rf);
+
+	_set_preferred_cluster(grp);
+
+	raw_spin_unlock(&grp->lock);
+
+	return 0;
+}
+
+void add_new_task_to_grp(struct task_struct *new)
+{
+	unsigned long flags;
+	struct related_thread_group *grp;
+	struct task_struct *leader = new->group_leader;
+	unsigned int leader_grp_id = sched_get_group_id(leader);
+
+	if (!sysctl_sched_enable_thread_grouping &&
+	    leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
+		return;
+
+	if (thread_group_leader(new))
+		return;
+
+	if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
+		if (!same_schedtune(new, leader))
+			return;
+	}
+
+	write_lock_irqsave(&related_thread_group_lock, flags);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(leader);
+	rcu_read_unlock();
+
+	/*
+	 * It's possible that someone already added the new task to the
+	 * group. A leader's thread group is updated prior to calling
+	 * this function. It's also possible that the leader has exited
+	 * the group. In either case, there is nothing else to do.
+	 */
+	if (!grp || new->grp) {
+		write_unlock_irqrestore(&related_thread_group_lock, flags);
+		return;
+	}
+
+	raw_spin_lock(&grp->lock);
+
+	rcu_assign_pointer(new->grp, grp);
+	list_add(&new->grp_list, &grp->tasks);
+
+	raw_spin_unlock(&grp->lock);
+	write_unlock_irqrestore(&related_thread_group_lock, flags);
+}
+
+static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+	int rc = 0;
+	unsigned long flags;
+	struct related_thread_group *grp = NULL;
+
+	if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	write_lock(&related_thread_group_lock);
+
+	/* Switching from one group to another directly is not permitted */
+	if ((current != p && p->flags & PF_EXITING) ||
+			(!p->grp && !group_id) ||
+			(p->grp && group_id))
+		goto done;
+
+	if (!group_id) {
+		remove_task_from_group(p);
+		goto done;
+	}
+
+	grp = lookup_related_thread_group(group_id);
+	if (list_empty(&grp->list))
+		list_add(&grp->list, &active_related_thread_groups);
+
+	rc = add_task_to_group(p, grp);
+done:
+	write_unlock(&related_thread_group_lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+	return rc;
+}
+
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+	/* DEFAULT_CGROUP_COLOC_ID is a reserved id */
+	if (group_id == DEFAULT_CGROUP_COLOC_ID)
+		return -EINVAL;
+
+	return __sched_set_group_id(p, group_id);
+}
+
+unsigned int sched_get_group_id(struct task_struct *p)
+{
+	unsigned int group_id;
+	struct related_thread_group *grp;
+
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	group_id = grp ? grp->id : 0;
+	rcu_read_unlock();
+
+	return group_id;
+}
+
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+/*
+ * We create a default colocation group at boot. There is no need to
+ * synchronize tasks between cgroups at creation time because the
+ * correct cgroup hierarchy is not available at boot. Therefore cgroup
+ * colocation is turned off by default even though the colocation group
+ * itself has been allocated. Furthermore this colocation group cannot
+ * be destroyted once it has been created. All of this has been as part
+ * of runtime optimizations.
+ *
+ * The job of synchronizing tasks to the colocation group is done when
+ * the colocation flag in the cgroup is turned on.
+ */
+static int __init create_default_coloc_group(void)
+{
+	struct related_thread_group *grp = NULL;
+	unsigned long flags;
+
+	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+	write_lock_irqsave(&related_thread_group_lock, flags);
+	list_add(&grp->list, &active_related_thread_groups);
+	write_unlock_irqrestore(&related_thread_group_lock, flags);
+
+	update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
+	return 0;
+}
+late_initcall(create_default_coloc_group);
+
+int sync_cgroup_colocation(struct task_struct *p, bool insert)
+{
+	unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
+
+	return __sched_set_group_id(p, grp_id);
+}
+#endif
+
+void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+	int i;
+	struct sched_cluster *cluster;
+	struct cpumask cpumask;
+	unsigned long flags;
+
+	cpumask_copy(&cpumask, cpus);
+	acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+		cluster->capacity = compute_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+	}
+
+	__update_min_max_capacity();
+
+	release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+static unsigned long max_cap[NR_CPUS];
+static unsigned long thermal_cap_cpu[NR_CPUS];
+
+unsigned long thermal_cap(int cpu)
+{
+	return thermal_cap_cpu[cpu] ?: cpu_rq(cpu)->cpu_capacity_orig;
+}
+
+unsigned long do_thermal_cap(int cpu, unsigned long thermal_max_freq)
+{
+	struct sched_domain *sd;
+	struct sched_group *sg;
+	struct rq *rq = cpu_rq(cpu);
+	int nr_cap_states;
+
+	if (!max_cap[cpu]) {
+		rcu_read_lock();
+		sd = rcu_dereference(per_cpu(sd_ea, cpu));
+		if (!sd || !sd->groups || !sd->groups->sge ||
+		    !sd->groups->sge->cap_states) {
+			rcu_read_unlock();
+			return rq->cpu_capacity_orig;
+		}
+		sg = sd->groups;
+		nr_cap_states = sg->sge->nr_cap_states;
+		max_cap[cpu] = sg->sge->cap_states[nr_cap_states - 1].cap;
+		rcu_read_unlock();
+	}
+
+	if (cpu_max_table_freq[cpu] &&
+	    unlikely(thermal_max_freq && thermal_max_freq
+		!= cpu_max_table_freq[cpu])) {
+		return div64_ul(thermal_max_freq * max_cap[cpu],
+				cpu_max_table_freq[cpu]);
+	} else {
+		return rq->cpu_capacity_orig;
+	}
+}
+
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+	struct cpumask cpumask;
+	struct sched_cluster *cluster;
+	int i, update_capacity = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
+	cpumask_copy(&cpumask, cpus);
+
+	for_each_cpu(i, &cpumask)
+		thermal_cap_cpu[i] = do_thermal_cap(i, fmax);
+
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+		update_capacity += (cluster->max_mitigated_freq != fmax);
+		cluster->max_mitigated_freq = fmax;
+	}
+	spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
+
+	if (update_capacity)
+		update_cpu_cluster_capacity(cpus);
+}
+
+/*
+ * Task's cpu usage is accounted in:
+ *	rq->curr/prev_runnable_sum,  when its ->grp is NULL
+ *	grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+				struct task_struct *p, int event)
+{
+	u64 wallclock;
+	struct group_cpu_time *cpu_time;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	int migrate_type;
+	int cpu = cpu_of(rq);
+	bool new_task;
+	int i;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+	new_task = is_new_task(p);
+
+	cpu_time = &rq->grp_time;
+	if (event == ADD_TASK) {
+		migrate_type = RQ_TO_GROUP;
+
+		src_curr_runnable_sum = &rq->curr_runnable_sum;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &rq->prev_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		*src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
+		*src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
+		if (new_task) {
+			*src_nt_curr_runnable_sum -=
+					p->ravg.curr_window_cpu[cpu];
+			*src_nt_prev_runnable_sum -=
+					p->ravg.prev_window_cpu[cpu];
+		}
+
+		update_cluster_load_subtractions(p, cpu,
+				rq->window_start, new_task);
+
+	} else {
+		migrate_type = GROUP_TO_RQ;
+
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_curr_runnable_sum = &rq->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+
+		*src_curr_runnable_sum -= p->ravg.curr_window;
+		*src_prev_runnable_sum -= p->ravg.prev_window;
+		if (new_task) {
+			*src_nt_curr_runnable_sum -= p->ravg.curr_window;
+			*src_nt_prev_runnable_sum -= p->ravg.prev_window;
+		}
+
+		/*
+		 * Need to reset curr/prev windows for all CPUs, not just the
+		 * ones in the same cluster. Since inter cluster migrations
+		 * did not result in the appropriate book keeping, the values
+		 * per CPU would be inaccurate.
+		 */
+		for_each_possible_cpu(i) {
+			p->ravg.curr_window_cpu[i] = 0;
+			p->ravg.prev_window_cpu[i] = 0;
+		}
+	}
+
+	*dst_curr_runnable_sum += p->ravg.curr_window;
+	*dst_prev_runnable_sum += p->ravg.prev_window;
+	if (new_task) {
+		*dst_nt_curr_runnable_sum += p->ravg.curr_window;
+		*dst_nt_prev_runnable_sum += p->ravg.prev_window;
+	}
+
+	/*
+	 * When a task enter or exits a group, it's curr and prev windows are
+	 * moved to a single CPU. This behavior might be sub-optimal in the
+	 * exit case, however, it saves us the overhead of handling inter
+	 * cluster migration fixups while the task is part of a related group.
+	 */
+	p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
+	p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
+
+	trace_sched_migration_update_sum(p, migrate_type, rq);
+
+	BUG_ON((s64)*src_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_prev_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+}
+
+/*
+ * Runs in hard-irq context. This should ideally run just after the latest
+ * window roll-over.
+ */
+void walt_irq_work(struct irq_work *irq_work)
+{
+	struct sched_cluster *cluster;
+	struct rq *rq;
+	int cpu;
+	u64 wc;
+
+	for_each_cpu(cpu, cpu_possible_mask)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+
+	wc = sched_ktime_clock();
+
+	for_each_sched_cluster(cluster) {
+		raw_spin_lock(&cluster->load_lock);
+
+		for_each_cpu(cpu, &cluster->cpus) {
+			rq = cpu_rq(cpu);
+			if (rq->curr) {
+				update_task_ravg(rq->curr, rq,
+						TASK_UPDATE, wc, 0);
+				account_load_subtractions(rq);
+			}
+
+			cpufreq_update_util(rq, 0);
+		}
+
+		raw_spin_unlock(&cluster->load_lock);
+	}
+
+	for_each_cpu(cpu, cpu_possible_mask)
+		raw_spin_unlock(&cpu_rq(cpu)->lock);
+
+	core_ctl_check(this_rq()->window_start);
 }
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index e181c87..f153332 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,47 +16,344 @@
 
 #ifdef CONFIG_SCHED_WALT
 
-void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
-		u64 wallclock, u64 irqtime);
-void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
-void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
-void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
-		struct task_struct *p);
-void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
-		struct task_struct *p);
-void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
-void walt_init_new_task_load(struct task_struct *p);
-void walt_mark_task_starting(struct task_struct *p);
-void walt_set_window_start(struct rq *rq);
-void walt_migrate_sync_cpu(int cpu);
-void walt_init_cpu_efficiency(void);
-u64 walt_ktime_clock(void);
-void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
+#include <linux/sched/sysctl.h>
+
+#define WINDOW_STATS_RECENT		0
+#define WINDOW_STATS_MAX		1
+#define WINDOW_STATS_MAX_RECENT_AVG	2
+#define WINDOW_STATS_AVG		3
+#define WINDOW_STATS_INVALID_POLICY	4
+
+/* Min window size (in ns) = 20ms */
+#define MIN_SCHED_RAVG_WINDOW 20000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+#define EXITING_TASK_MARKER	0xdeaddead
+
+#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK	0
+#define FREQ_REPORT_CPU_LOAD			1
+#define FREQ_REPORT_TOP_TASK			2
+
+#define for_each_related_thread_group(grp) \
+	list_for_each_entry(grp, &active_related_thread_groups, list)
+
+#define SCHED_NEW_TASK_WINDOWS 5
+
+extern unsigned int sched_ravg_window;
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_possible_freq;
+extern unsigned int sched_major_task_runtime;
+extern unsigned int __read_mostly sched_init_task_load_windows;
+extern unsigned int __read_mostly sched_load_granule;
+
+extern struct mutex cluster_lock;
+extern rwlock_t related_thread_group_lock;
+extern __read_mostly unsigned int sched_ravg_hist_size;
+extern __read_mostly unsigned int sched_freq_aggregate;
+extern __read_mostly int sched_freq_aggregate_threshold;
+extern __read_mostly unsigned int sched_window_stats_policy;
+extern __read_mostly unsigned int sched_group_upmigrate;
+extern __read_mostly unsigned int sched_group_downmigrate;
+
+extern struct sched_cluster init_cluster;
+
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime);
+
+extern unsigned int nr_eligible_big_tasks(int cpu);
+
+#ifndef CONFIG_SCHED_HMP
+static inline void
+inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (p->misfit)
+		stats->nr_big_tasks++;
+}
+
+static inline void
+dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (p->misfit)
+		stats->nr_big_tasks--;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+#endif
+
+static inline void
+adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
+{
+	struct rq *rq = container_of(stats, struct rq, hmp_stats);
+
+	if (sched_disable_window_stats)
+		return;
+
+	sched_update_nr_prod(cpu_of(rq), 0, true);
+	stats->nr_big_tasks += inc ? delta : -delta;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				 struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg += task_load;
+	stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg -= task_load;
+
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum -= p->ravg.pred_demand;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+			      struct task_struct *p, s64 task_load_delta,
+			      s64 pred_demand_delta)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	stats->cumulative_runnable_avg += task_load_delta;
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum += pred_demand_delta;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+extern void inc_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
                                   u64 wallclock);
 
-u64 walt_irqload(int cpu);
-int walt_cpu_high_irqload(int cpu);
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	s64 delta;
+
+	delta = get_jiffies_64() - rq->irqload_ts;
+	/*
+	 * Current context can be preempted by irq and rq->irqload_ts can be
+	 * updated by irq context so that delta can be negative.
+	 * But this is okay and we can safely return as this means there
+	 * was recent irq occurrence.
+	 */
+
+	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+		return rq->avg_irqload;
+	else
+		return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline int exiting_task(struct task_struct *p)
+{
+	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
+}
+
+extern u64 sched_ktime_clock(void);
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+	return cpu_rq(cpu)->cluster;
+}
+
+static inline u64
+scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
+{
+	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
+}
+
+static inline bool is_new_task(struct task_struct *p)
+{
+	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
+}
+
+static inline void clear_top_tasks_table(u8 *table)
+{
+	memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
+}
+
+extern void update_cluster_load_subtractions(struct task_struct *p,
+					int cpu, u64 ws, bool new_task);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+				   u64 wallclock);
+
+static inline unsigned int max_task_load(void)
+{
+	return sched_ravg_window;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+{
+	return div64_u64(cycles, period);
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline void
+move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
+{
+	struct list_head *first, *last;
+
+	first = src->next;
+	last = src->prev;
+
+	if (sync_rcu) {
+		INIT_LIST_HEAD_RCU(src);
+		synchronize_rcu();
+	}
+
+	first->prev = dst;
+	dst->prev = last;
+	last->next = dst;
+
+	/* Ensure list sanity before making the head visible to all CPUs. */
+	smp_mb();
+	dst->next = first;
+}
+
+extern void reset_task_stats(struct task_struct *p);
+extern void update_cluster_topology(void);
+
+extern struct list_head cluster_head;
+#define for_each_sched_cluster(cluster) \
+	list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+extern void init_clusters(void);
+
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
+
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock);
+
+static inline void assign_cluster_ids(struct list_head *head)
+{
+	struct sched_cluster *cluster;
+	int pos = 0;
+
+	list_for_each_entry(cluster, head, list) {
+		cluster->id = pos;
+		sched_cluster[pos++] = cluster;
+	}
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+void sort_clusters(void);
+
+void walt_irq_work(struct irq_work *irq_work);
 
 #else /* CONFIG_SCHED_WALT */
 
-static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
-		int event, u64 wallclock, u64 irqtime) { }
-static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
-static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
-static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
-		struct task_struct *p) { }
-static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
-		struct task_struct *p) { }
-static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static inline void walt_init_new_task_load(struct task_struct *p) { }
-static inline void walt_mark_task_starting(struct task_struct *p) { }
-static inline void walt_set_window_start(struct rq *rq) { }
-static inline void walt_migrate_sync_cpu(int cpu) { }
-static inline void walt_init_cpu_efficiency(void) { }
-static inline u64 walt_ktime_clock(void) { return 0; }
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+				int event, u64 wallclock, u64 irqtime) { }
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+	return 0;
+}
+
+static inline void adjust_nr_big_tasks(struct hmp_sched_stats *stats,
+		int delta, bool inc)
+{
+}
+
+static inline void inc_nr_big_task(struct hmp_sched_stats *stats,
+		struct task_struct *p)
+{
+}
+
+static inline void dec_nr_big_task(struct hmp_sched_stats *stats,
+		struct task_struct *p)
+{
+}
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+					  u64 wallclock)
+{
+}
+
+static inline void update_cluster_topology(void) { }
+static inline void init_clusters(void) {}
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
 
 #endif /* CONFIG_SCHED_WALT */
 
-extern unsigned int walt_disabled;
-
 #endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f55a02b..534431a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -292,6 +292,15 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+#ifdef CONFIG_SCHED_WALT
+	{
+		.procname       = "sched_cpu_high_irqload",
+		.data           = &sysctl_sched_cpu_high_irqload,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+#endif
 #ifdef CONFIG_SCHED_HMP
 	{
 		.procname	= "sched_freq_reporting_policy",
@@ -319,13 +328,6 @@
 		.extra1		= &zero,
 	},
 	{
-		.procname       = "sched_cpu_high_irqload",
-		.data           = &sysctl_sched_cpu_high_irqload,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = proc_dointvec,
-	},
-	{
 		.procname       = "sched_ravg_hist_size",
 		.data           = &sysctl_sched_ravg_hist_size,
 		.maxlen         = sizeof(unsigned int),
@@ -480,15 +482,6 @@
 		.extra2		= &one_thousand,
 	},
 	{
-		.procname	= "sched_boost",
-		.data		= &sysctl_sched_boost,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_boost_handler,
-		.extra1         = &zero,
-		.extra2		= &three,
-	},
-	{
 		.procname	= "sched_short_burst_ns",
 		.data		= &sysctl_sched_short_burst,
 		.maxlen		= sizeof(unsigned int),
@@ -545,18 +538,13 @@
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.procname	= "sched_walt_init_task_load_pct",
-		.data		= &sysctl_sched_walt_init_task_load_pct,
+		.procname	= "sched_boost",
+		.data		= &sysctl_sched_boost,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "sched_walt_cpu_high_irqload",
-		.data		= &sysctl_sched_walt_cpu_high_irqload,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= sched_boost_handler,
+		.extra1         = &zero,
+		.extra2		= &three,
 	},
 #endif
 	{
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 0e9505f..1258b16 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -559,7 +559,8 @@
 
 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
-		kasan_report_double_free(cache, object, shadow_byte);
+		kasan_report_double_free(cache, object,
+				__builtin_return_address(1));
 		return true;
 	}
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6..7572917 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -104,7 +104,7 @@
 void kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 void kasan_report_double_free(struct kmem_cache *cache, void *object,
-			s8 shadow);
+					void *ip);
 
 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 073325a..35d2db8 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -48,7 +48,13 @@
 	return first_bad_addr;
 }
 
-static void print_error_description(struct kasan_access_info *info)
+static bool addr_has_shadow(struct kasan_access_info *info)
+{
+	return (info->access_addr >=
+		kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+}
+
+static const char *get_shadow_bug_type(struct kasan_access_info *info)
 {
 	const char *bug_type = "unknown-crash";
 	u8 *shadow_addr;
@@ -95,12 +101,39 @@
 		break;
 	}
 
-	pr_err("BUG: KASAN: %s in %pS at addr %p\n",
-		bug_type, (void *)info->ip,
-		info->access_addr);
-	pr_err("%s of size %zu by task %s/%d\n",
-		info->is_write ? "Write" : "Read",
-		info->access_size, current->comm, task_pid_nr(current));
+	return bug_type;
+}
+
+const char *get_wild_bug_type(struct kasan_access_info *info)
+{
+	const char *bug_type = "unknown-crash";
+
+	if ((unsigned long)info->access_addr < PAGE_SIZE)
+		bug_type = "null-ptr-deref";
+	else if ((unsigned long)info->access_addr < TASK_SIZE)
+		bug_type = "user-memory-access";
+	else
+		bug_type = "wild-memory-access";
+
+	return bug_type;
+}
+
+static const char *get_bug_type(struct kasan_access_info *info)
+{
+	if (addr_has_shadow(info))
+		return get_shadow_bug_type(info);
+	return get_wild_bug_type(info);
+}
+
+static void print_error_description(struct kasan_access_info *info)
+{
+	const char *bug_type = get_bug_type(info);
+
+	pr_err("BUG: KASAN: %s in %pS\n",
+		bug_type, (void *)info->ip);
+	pr_err("%s of size %zu at addr %p by task %s/%d\n",
+		info->is_write ? "Write" : "Read", info->access_size,
+		info->access_addr, current->comm, task_pid_nr(current));
 }
 
 static inline bool kernel_or_module_addr(const void *addr)
@@ -139,9 +172,9 @@
 	kasan_enable_current();
 }
 
-static void print_track(struct kasan_track *track)
+static void print_track(struct kasan_track *track, const char *prefix)
 {
-	pr_err("PID = %u\n", track->pid);
+	pr_err("%s by task %u:\n", prefix, track->pid);
 	if (track->stack) {
 		struct stack_trace trace;
 
@@ -152,59 +185,84 @@
 	}
 }
 
-static void kasan_object_err(struct kmem_cache *cache, void *object)
+static struct page *addr_to_page(const void *addr)
+{
+	if ((addr >= (void *)PAGE_OFFSET) &&
+			(addr < high_memory))
+		return virt_to_head_page(addr);
+	return NULL;
+}
+
+static void describe_object_addr(struct kmem_cache *cache, void *object,
+				const void *addr)
+{
+	unsigned long access_addr = (unsigned long)addr;
+	unsigned long object_addr = (unsigned long)object;
+	const char *rel_type;
+	int rel_bytes;
+
+	pr_err("The buggy address belongs to the object at %p\n"
+	       " which belongs to the cache %s of size %d\n",
+		object, cache->name, cache->object_size);
+
+	if (!addr)
+		return;
+
+	if (access_addr < object_addr) {
+		rel_type = "to the left";
+		rel_bytes = object_addr - access_addr;
+	} else if (access_addr >= object_addr + cache->object_size) {
+		rel_type = "to the right";
+		rel_bytes = access_addr - (object_addr + cache->object_size);
+	} else {
+		rel_type = "inside";
+		rel_bytes = access_addr - object_addr;
+	}
+
+	pr_err("The buggy address is located %d bytes %s of\n"
+	       " %d-byte region [%p, %p)\n",
+		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
+		(void *)(object_addr + cache->object_size));
+}
+
+static void describe_object(struct kmem_cache *cache, void *object,
+				const void *addr)
 {
 	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
 
+	if (cache->flags & SLAB_KASAN) {
+		print_track(&alloc_info->alloc_track, "Allocated");
+		pr_err("\n");
+		print_track(&alloc_info->free_track, "Freed");
+		pr_err("\n");
+	}
+
+	describe_object_addr(cache, object, addr);
+}
+
+static void print_address_description(void *addr)
+{
+	struct page *page = addr_to_page(addr);
+
 	dump_stack();
-	pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
-		cache->object_size);
+	pr_err("\n");
 
-	if (!(cache->flags & SLAB_KASAN))
-		return;
+	if (page && PageSlab(page)) {
+		struct kmem_cache *cache = page->slab_cache;
+		void *object = nearest_obj(cache, page,	addr);
 
-	pr_err("Allocated:\n");
-	print_track(&alloc_info->alloc_track);
-	pr_err("Freed:\n");
-	print_track(&alloc_info->free_track);
-}
+		describe_object(cache, object, addr);
+	}
 
-void kasan_report_double_free(struct kmem_cache *cache, void *object,
-			s8 shadow)
-{
-	unsigned long flags;
+	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
+		pr_err("The buggy address belongs to the variable:\n");
+		pr_err(" %pS\n", addr);
+	}
 
-	kasan_start_report(&flags);
-	pr_err("BUG: Double free or freeing an invalid pointer\n");
-	pr_err("Unexpected shadow byte: 0x%hhX\n", shadow);
-	kasan_object_err(cache, object);
-	kasan_end_report(&flags);
-}
-
-static void print_address_description(struct kasan_access_info *info)
-{
-	const void *addr = info->access_addr;
-
-	if ((addr >= (void *)PAGE_OFFSET) &&
-		(addr < high_memory)) {
-		struct page *page = virt_to_head_page(addr);
-
-		if (PageSlab(page)) {
-			void *object;
-			struct kmem_cache *cache = page->slab_cache;
-			object = nearest_obj(cache, page,
-						(void *)info->access_addr);
-			kasan_object_err(cache, object);
-			return;
-		}
+	if (page) {
+		pr_err("The buggy address belongs to the page:\n");
 		dump_page(page, "kasan: bad access detected");
 	}
-
-	if (kernel_or_module_addr(addr)) {
-		if (!init_task_stack_addr(addr))
-			pr_err("Address belongs to variable %pS\n", addr);
-	}
-	dump_stack();
 }
 
 static bool row_is_guilty(const void *row, const void *guilty)
@@ -259,31 +317,34 @@
 	}
 }
 
+void kasan_report_double_free(struct kmem_cache *cache, void *object,
+				void *ip)
+{
+	unsigned long flags;
+
+	kasan_start_report(&flags);
+	pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip);
+	pr_err("\n");
+	print_address_description(object);
+	pr_err("\n");
+	print_shadow_for_address(object);
+	kasan_end_report(&flags);
+}
+
 static void kasan_report_error(struct kasan_access_info *info)
 {
 	unsigned long flags;
-	const char *bug_type;
 
 	kasan_start_report(&flags);
 
-	if (info->access_addr <
-			kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
-		if ((unsigned long)info->access_addr < PAGE_SIZE)
-			bug_type = "null-ptr-deref";
-		else if ((unsigned long)info->access_addr < TASK_SIZE)
-			bug_type = "user-memory-access";
-		else
-			bug_type = "wild-memory-access";
-		pr_err("BUG: KASAN: %s on address %p\n",
-			bug_type, info->access_addr);
-		pr_err("%s of size %zu by task %s/%d\n",
-			info->is_write ? "Write" : "Read",
-			info->access_size, current->comm,
-			task_pid_nr(current));
+	print_error_description(info);
+	pr_err("\n");
+
+	if (!addr_has_shadow(info)) {
 		dump_stack();
 	} else {
-		print_error_description(info);
-		print_address_description(info);
+		print_address_description((void *)info->access_addr);
+		pr_err("\n");
 		print_shadow_for_address(info->first_bad_addr);
 	}
 
diff --git a/net/9p/client.c b/net/9p/client.c
index 3fc94a4..cf129fe 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2101,6 +2101,10 @@
 		trace_9p_protocol_dump(clnt, req->rc);
 		goto free_and_error;
 	}
+	if (rsize < count) {
+		pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+		count = rsize;
+	}
 
 	p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
 
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9901e5b..f45f619 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -859,7 +859,8 @@
 	if (skb)
 		skb = skb_clone(skb, GFP_ATOMIC);
 	write_unlock(&neigh->lock);
-	neigh->ops->solicit(neigh, skb);
+	if (neigh->ops->solicit)
+		neigh->ops->solicit(neigh, skb);
 	atomic_inc(&neigh->probes);
 	kfree_skb(skb);
 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 53599bd..457f882 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@
 	while ((skb = skb_dequeue(&npinfo->txq))) {
 		struct net_device *dev = skb->dev;
 		struct netdev_queue *txq;
+		unsigned int q_index;
 
 		if (!netif_device_present(dev) || !netif_running(dev)) {
 			kfree_skb(skb);
 			continue;
 		}
 
-		txq = skb_get_tx_queue(dev, skb);
-
 		local_irq_save(flags);
+		/* check if skb->queue_mapping is still valid */
+		q_index = skb_get_queue_mapping(skb);
+		if (unlikely(q_index >= dev->real_num_tx_queues)) {
+			q_index = q_index % dev->real_num_tx_queues;
+			skb_set_queue_mapping(skb, q_index);
+		}
+		txq = netdev_get_tx_queue(dev, q_index);
 		HARD_TX_LOCK(dev, txq, smp_processor_id());
 		if (netif_xmit_frozen_or_stopped(txq) ||
 		    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8de6707..ba1146c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3082,22 +3082,32 @@
 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
 		if (!(features & NETIF_F_GSO_PARTIAL)) {
 			struct sk_buff *iter;
+			unsigned int frag_len;
 
 			if (!list_skb ||
 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
 				goto normal;
 
-			/* Split the buffer at the frag_list pointer.
-			 * This is based on the assumption that all
-			 * buffers in the chain excluding the last
-			 * containing the same amount of data.
+			/* If we get here then all the required
+			 * GSO features except frag_list are supported.
+			 * Try to split the SKB to multiple GSO SKBs
+			 * with no frag_list.
+			 * Currently we can do that only when the buffers don't
+			 * have a linear part and all the buffers except
+			 * the last are of the same length.
 			 */
+			frag_len = list_skb->len;
 			skb_walk_frags(head_skb, iter) {
+				if (frag_len != iter->len && iter->next)
+					goto normal;
 				if (skb_headlen(iter))
 					goto normal;
 
 				len -= iter->len;
 			}
+
+			if (len != frag_len)
+				goto normal;
 		}
 
 		/* GSO partial only requires that we trim off any excess that
@@ -3785,6 +3795,7 @@
 	serr->ee.ee_errno = ENOMSG;
 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
 	serr->ee.ee_info = tstype;
+	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
 		if (sk->sk_protocol == IPPROTO_TCP &&
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9826695..4d37bdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -474,16 +474,15 @@
 		return false;
 
 	/* Support IP_PKTINFO on tstamp packets if requested, to correlate
-	 * timestamp with egress dev. Not possible for packets without dev
+	 * timestamp with egress dev. Not possible for packets without iif
 	 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
 	 */
-	if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
-	    (!skb->dev))
+	info = PKTINFO_SKB_CB(skb);
+	if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+	    !info->ipi_ifindex)
 		return false;
 
-	info = PKTINFO_SKB_CB(skb);
 	info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
-	info->ipi_ifindex = skb->dev->ifindex;
 	return true;
 }
 
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 06879e6..93bfadf 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@
 void ping_unhash(struct sock *sk)
 {
 	struct inet_sock *isk = inet_sk(sk);
+
 	pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+	write_lock_bh(&ping_table.lock);
 	if (sk_hashed(sk)) {
-		write_lock_bh(&ping_table.lock);
 		hlist_nulls_del(&sk->sk_nulls_node);
 		sk_nulls_node_init(&sk->sk_nulls_node);
 		sock_put(sk);
 		isk->inet_num = 0;
 		isk->inet_sport = 0;
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-		write_unlock_bh(&ping_table.lock);
 	}
+	write_unlock_bh(&ping_table.lock);
 }
 EXPORT_SYMBOL_GPL(ping_unhash);
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e6acef5..70c40ba2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2581,7 +2581,7 @@
 	skb_reset_network_header(skb);
 
 	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-	ip_hdr(skb)->protocol = IPPROTO_ICMP;
+	ip_hdr(skb)->protocol = IPPROTO_UDP;
 	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
 
 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6a90a0e..eb142ca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2297,6 +2297,7 @@
 	tcp_init_send_head(sk);
 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
 	__sk_dst_reset(sk);
+	tcp_saved_syn_free(tp);
 
 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
 
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index f9038d6b..baea5df 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -167,12 +167,8 @@
 	}
 out:
 	rcu_read_unlock();
+	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-	/* Clear out private data before diag gets it and
-	 * the ca has not been initialized.
-	 */
-	if (ca->get_info)
-		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 	if (ca->flags & TCP_CONG_NEEDS_ECN)
 		INET_ECN_xmit(sk);
 	else
@@ -199,11 +195,10 @@
 	tcp_cleanup_congestion_control(sk);
 	icsk->icsk_ca_ops = ca;
 	icsk->icsk_ca_setsockopt = 1;
+	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-	if (sk->sk_state != TCP_CLOSE) {
-		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+	if (sk->sk_state != TCP_CLOSE)
 		tcp_init_congestion_control(sk);
-	}
 }
 
 /* Manage refcounts on socket close. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7588fa9..553138d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3293,14 +3293,24 @@
 static int fixup_permanent_addr(struct inet6_dev *idev,
 				struct inet6_ifaddr *ifp)
 {
-	if (!ifp->rt) {
-		struct rt6_info *rt;
+	/* rt6i_ref == 0 means the host route was removed from the
+	 * FIB, for example, if 'lo' device is taken down. In that
+	 * case regenerate the host route.
+	 */
+	if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
+		struct rt6_info *rt, *prev;
 
 		rt = addrconf_dst_alloc(idev, &ifp->addr, false);
 		if (unlikely(IS_ERR(rt)))
 			return PTR_ERR(rt);
 
+		/* ifp->rt can be accessed outside of rtnl */
+		spin_lock(&ifp->lock);
+		prev = ifp->rt;
 		ifp->rt = rt;
+		spin_unlock(&ifp->lock);
+
+		ip6_rt_put(prev);
 	}
 
 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
@@ -3642,14 +3652,19 @@
 	INIT_LIST_HEAD(&del_list);
 	list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
 		struct rt6_info *rt = NULL;
+		bool keep;
 
 		addrconf_del_dad_work(ifa);
 
+		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+			!addr_is_local(&ifa->addr);
+		if (!keep)
+			list_move(&ifa->if_list, &del_list);
+
 		write_unlock_bh(&idev->lock);
 		spin_lock_bh(&ifa->lock);
 
-		if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
-		    !addr_is_local(&ifa->addr)) {
+		if (keep) {
 			/* set state to skip the notifier below */
 			state = INET6_IFADDR_STATE_DEAD;
 			ifa->state = 0;
@@ -3661,8 +3676,6 @@
 		} else {
 			state = ifa->state;
 			ifa->state = INET6_IFADDR_STATE_DEAD;
-
-			list_move(&ifa->if_list, &del_list);
 		}
 
 		spin_unlock_bh(&ifa->lock);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1529833..a381772 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -401,9 +401,6 @@
  * At one point, excluding local errors was a quick test to identify icmp/icmp6
  * errors. This is no longer true, but the test remained, so the v6 stack,
  * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
  */
 static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
 				      struct sock_exterr_skb *serr)
@@ -415,14 +412,9 @@
 	if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
 		return false;
 
-	if (!skb->dev)
+	if (!IP6CB(skb)->iif)
 		return false;
 
-	if (skb->protocol == htons(ETH_P_IPV6))
-		IP6CB(skb)->iif = skb->dev->ifindex;
-	else
-		PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
 	return true;
 }
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 885b411..97e89a2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1037,7 +1037,7 @@
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net *net = t->net;
 	struct net_device_stats *stats = &t->dev->stats;
-	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ipv6hdr *ipv6h;
 	struct ipv6_tel_txoption opt;
 	struct dst_entry *dst = NULL, *ndst = NULL;
 	struct net_device *tdev;
@@ -1057,26 +1057,28 @@
 
 	/* NBMA tunnel */
 	if (ipv6_addr_any(&t->parms.raddr)) {
-		struct in6_addr *addr6;
-		struct neighbour *neigh;
-		int addr_type;
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			struct in6_addr *addr6;
+			struct neighbour *neigh;
+			int addr_type;
 
-		if (!skb_dst(skb))
-			goto tx_err_link_failure;
+			if (!skb_dst(skb))
+				goto tx_err_link_failure;
 
-		neigh = dst_neigh_lookup(skb_dst(skb),
-					 &ipv6_hdr(skb)->daddr);
-		if (!neigh)
-			goto tx_err_link_failure;
+			neigh = dst_neigh_lookup(skb_dst(skb),
+						 &ipv6_hdr(skb)->daddr);
+			if (!neigh)
+				goto tx_err_link_failure;
 
-		addr6 = (struct in6_addr *)&neigh->primary_key;
-		addr_type = ipv6_addr_type(addr6);
+			addr6 = (struct in6_addr *)&neigh->primary_key;
+			addr_type = ipv6_addr_type(addr6);
 
-		if (addr_type == IPV6_ADDR_ANY)
-			addr6 = &ipv6_hdr(skb)->daddr;
+			if (addr_type == IPV6_ADDR_ANY)
+				addr6 = &ipv6_hdr(skb)->daddr;
 
-		memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
-		neigh_release(neigh);
+			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+			neigh_release(neigh);
+		}
 	} else if (!(t->parms.flags &
 		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
 		/* enable the cache only only if the routing decision does
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7f4265b..117405d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@
  *	Delete a VIF entry
  */
 
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+		       struct list_head *head)
 {
 	struct mif_device *v;
 	struct net_device *dev;
@@ -820,7 +821,7 @@
 					     dev->ifindex, &in6_dev->cnf);
 	}
 
-	if (v->flags & MIFF_REGISTER)
+	if ((v->flags & MIFF_REGISTER) && !notify)
 		unregister_netdevice_queue(dev, head);
 
 	dev_put(dev);
@@ -1331,7 +1332,6 @@
 	struct mr6_table *mrt;
 	struct mif_device *v;
 	int ct;
-	LIST_HEAD(list);
 
 	if (event != NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@
 		v = &mrt->vif6_table[0];
 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
 			if (v->dev == dev)
-				mif6_delete(mrt, ct, &list);
+				mif6_delete(mrt, ct, 1, NULL);
 		}
 	}
-	unregister_netdevice_many(&list);
 
 	return NOTIFY_DONE;
 }
@@ -1552,7 +1551,7 @@
 	for (i = 0; i < mrt->maxvif; i++) {
 		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
 			continue;
-		mif6_delete(mrt, i, &list);
+		mif6_delete(mrt, i, 0, &list);
 	}
 	unregister_netdevice_many(&list);
 
@@ -1706,7 +1705,7 @@
 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
 			return -EFAULT;
 		rtnl_lock();
-		ret = mif6_delete(mrt, mifi, NULL);
+		ret = mif6_delete(mrt, mifi, 0, NULL);
 		rtnl_unlock();
 		return ret;
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5665375..1a34da0 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1172,8 +1172,7 @@
 		spin_lock_bh(&sk->sk_receive_queue.lock);
 		skb = skb_peek(&sk->sk_receive_queue);
 		if (skb)
-			amount = skb_tail_pointer(skb) -
-				skb_transport_header(skb);
+			amount = skb->len;
 		spin_unlock_bh(&sk->sk_receive_queue.lock);
 		return put_user(amount, (int __user *)arg);
 	}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 73527d8..7d17670 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1825,6 +1825,10 @@
 	int addr_type;
 	int err = -EINVAL;
 
+	/* RTF_PCPU is an internal flag; can not be set by userspace */
+	if (cfg->fc_flags & RTF_PCPU)
+		goto out;
+
 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
 		goto out;
 #ifndef CONFIG_IPV6_SUBTREES
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a646f34..fecad10 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1685,7 +1685,7 @@
 		struct kcm_attach info;
 
 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-			err = -EFAULT;
+			return -EFAULT;
 
 		err = kcm_attach_ioctl(sock, &info);
 
@@ -1695,7 +1695,7 @@
 		struct kcm_unattach info;
 
 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-			err = -EFAULT;
+			return -EFAULT;
 
 		err = kcm_unattach_ioctl(sock, &info);
 
@@ -1706,7 +1706,7 @@
 		struct socket *newsock = NULL;
 
 		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-			err = -EFAULT;
+			return -EFAULT;
 
 		err = kcm_clone(sock, &info, &newsock);
 
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a2ed3bd..e702cb95 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,8 @@
 }
 EXPORT_SYMBOL_GPL(l2tp_session_find);
 
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+					  bool do_ref)
 {
 	int hash;
 	struct l2tp_session *session;
@@ -288,6 +289,9 @@
 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
 		hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
 			if (++count > nth) {
+				l2tp_session_inc_refcount(session);
+				if (do_ref && session->ref)
+					session->ref(session);
 				read_unlock_bh(&tunnel->hlist_lock);
 				return session;
 			}
@@ -298,7 +302,7 @@
 
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
 
 /* Lookup a session by interface name.
  * This is very inefficient but is only used by management interfaces.
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 181e755c..e7233ba 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -243,7 +243,8 @@
 struct l2tp_session *l2tp_session_find(struct net *net,
 				       struct l2tp_tunnel *tunnel,
 				       u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+					  bool do_ref);
 struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a..d100aed 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@
 
 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
 {
-	pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+	pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
 	pd->session_idx++;
 
 	if (pd->session == NULL) {
@@ -238,10 +238,14 @@
 	}
 
 	/* Show the tunnel or session context */
-	if (pd->session == NULL)
+	if (!pd->session) {
 		l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
-	else
+	} else {
 		l2tp_dfs_seq_session_show(m, pd->session);
+		if (pd->session->deref)
+			pd->session->deref(pd->session);
+		l2tp_session_dec_refcount(pd->session);
+	}
 
 out:
 	return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ff750bb..2066953 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -178,9 +178,10 @@
 
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
 	tunnel = l2tp_tunnel_find(net, tunnel_id);
-	if (tunnel != NULL)
+	if (tunnel) {
 		sk = tunnel->sock;
-	else {
+		sock_hold(sk);
+	} else {
 		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
 		read_lock_bh(&l2tp_ip_lock);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7095786..26cf4dc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -191,9 +191,10 @@
 
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
 	tunnel = l2tp_tunnel_find(net, tunnel_id);
-	if (tunnel != NULL)
+	if (tunnel) {
 		sk = tunnel->sock;
-	else {
+		sock_hold(sk);
+	} else {
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		read_lock_bh(&l2tp_ip6_lock);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bf31177..9f66272 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -844,7 +844,7 @@
 				goto out;
 		}
 
-		session = l2tp_session_find_nth(tunnel, si);
+		session = l2tp_session_get_nth(tunnel, si, false);
 		if (session == NULL) {
 			ti++;
 			tunnel = NULL;
@@ -854,8 +854,11 @@
 
 		if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
-					 session, L2TP_CMD_SESSION_GET) < 0)
+					 session, L2TP_CMD_SESSION_GET) < 0) {
+			l2tp_session_dec_refcount(session);
 			break;
+		}
+		l2tp_session_dec_refcount(session);
 
 		si++;
 	}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 41d47bf..1387f54 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@
 static void pppol2tp_session_destruct(struct sock *sk)
 {
 	struct l2tp_session *session = sk->sk_user_data;
+
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_queue_purge(&sk->sk_write_queue);
+
 	if (session) {
 		sk->sk_user_data = NULL;
 		BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@
 		l2tp_session_queue_purge(session);
 		sock_put(sk);
 	}
-	skb_queue_purge(&sk->sk_receive_queue);
-	skb_queue_purge(&sk->sk_write_queue);
-
 	release_sock(sk);
 
 	/* This will delete the session context via
@@ -1554,7 +1555,7 @@
 
 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
 {
-	pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+	pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
 	pd->session_idx++;
 
 	if (pd->session == NULL) {
@@ -1681,10 +1682,14 @@
 
 	/* Show the tunnel or session context.
 	 */
-	if (pd->session == NULL)
+	if (!pd->session) {
 		pppol2tp_seq_tunnel_show(m, pd->tunnel);
-	else
+	} else {
 		pppol2tp_seq_session_show(m, pd->session);
+		if (pd->session->deref)
+			pd->session->deref(pd->session);
+		l2tp_session_dec_refcount(pd->session);
+	}
 
 out:
 	return 0;
@@ -1843,4 +1848,4 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(PPPOL2TP_DRV_VERSION);
 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 0f5628a..3c7ae04 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -969,9 +969,8 @@
 		for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
 			IF_DEBUG("qtaguid: iface_stat: create(%s): "
 				 "ifa=%p ifa_label=%s\n",
-				 ifname, ifa,
-				 ifa->ifa_label ? ifa->ifa_label : "(null)");
-			if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+				 ifname, ifa, ifa->ifa_label);
+			if (!strcmp(ifname, ifa->ifa_label))
 				break;
 		}
 	}
@@ -1209,10 +1208,6 @@
 		pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
 				   par->hooknum, __func__);
 		BUG();
-	} else if (unlikely(!el_dev->name)) {
-		pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
-				   par->hooknum, __func__);
-		BUG();
 	} else {
 		proto = ipx_proto(skb, par);
 		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
@@ -1637,8 +1632,6 @@
 
 	if (unlikely(!el_dev)) {
 		pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
-	} else if (unlikely(!el_dev->name)) {
-		pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
 	} else {
 		int proto = ipx_proto(skb, par);
 		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ab0974..cb76ff3 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3702,6 +3702,8 @@
 			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
+		if (val > INT_MAX)
+			return -EINVAL;
 		po->tp_reserve = val;
 		return 0;
 	}
@@ -4247,6 +4249,8 @@
 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
 		if (unlikely(rb->frames_per_block == 0))
 			goto out;
+		if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+			goto out;
 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
 					req->tp_frame_nr))
 			goto out;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6734420..14346dc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6861,6 +6861,9 @@
 	if (sock->state != SS_UNCONNECTED)
 		goto out;
 
+	if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+		goto out;
+
 	/* If backlog is zero, disable listening. */
 	if (!backlog) {
 		if (sctp_sstate(sk, CLOSED))
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 449e4a3..a2dff71 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -58,6 +58,8 @@
 	(5490 - 5590 @ 80), (36)
 	(5650 - 5730 @ 80), (36)
 	(5735 - 5835 @ 80), (36)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40), NO-OUTDOOR
 
 country AS: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -81,7 +83,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country AU: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -90,6 +92,8 @@
 	(5490 - 5590 @ 80), (24), DFS
 	(5650 - 5730 @ 80), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (43), NO-OUTDOOR
 
 country AW: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -107,8 +111,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country BB: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -135,7 +137,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country BF: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -159,7 +161,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country BH:
 	(2402 - 2482 @ 40), (20)
@@ -189,6 +191,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5250 - 5330 @ 80), (30), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country BR: DFS-FCC
 	(2402 - 2482 @ 40), (30)
@@ -196,6 +200,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
 
 country BS: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -229,6 +235,8 @@
 	(5490 - 5590 @ 80), (24), DFS
 	(5650 - 5730 @ 80), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
 
 country CF: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -251,6 +259,8 @@
 	(5890 - 5910 @ 10), (30)
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country CI: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -263,17 +273,16 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (20)
 	(5735 - 5835 @ 80), (20)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (50), NO-OUTDOOR
 
 country CN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5735 - 5835 @ 80), (33)
-	# 60 gHz band channels 1,4: 28dBm, channels 2,3: 44dBm
-	# ref: http://www.miit.gov.cn/n11293472/n11505629/n11506593/n11960250/n11960606/n11960700/n12330791.files/n12330790.pdf
-	(57240 - 59400 @ 2160), (28)
+	# 60 gHz band channels 2,3: 44dBm
 	(59400 - 63720 @ 2160), (44)
-	(63720 - 65880 @ 2160), (28)
 
 country CO: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -288,6 +297,8 @@
 	(5250 - 5330 @ 20), (24), DFS
 	(5490 - 5730 @ 20), (24), DFS
 	(5735 - 5835 @ 20), (30)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (30)
 
 country CX: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -311,7 +322,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 # Data from http://www.ctu.eu/164/download/VOR/VOR-12-08-2005-34.pdf
 # and http://www.ctu.eu/164/download/VOR/VOR-12-05-2007-6-AN.pdf
@@ -330,7 +341,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 # Data from "Frequenznutzungsplan" (as published in April 2008), downloaded from
 # http://www.bundesnetzagentur.de/cae/servlet/contentblob/38448/publicationFile/2659/Frequenznutzungsplan2008_Id17448pdf.pdf
@@ -353,7 +364,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country DK: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -370,7 +381,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country DM: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -396,6 +407,8 @@
 	(5250 - 5330 @ 20), (24), DFS
 	(5490 - 5730 @ 20), (24), DFS
 	(5735 - 5835 @ 20), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country EE: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -412,7 +425,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country EG: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -434,7 +447,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country ET: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -457,7 +470,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country FM: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -481,7 +494,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country GB: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -498,7 +511,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country GD: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -511,8 +524,6 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (18), AUTO-BW
 	(5250 - 5330 @ 80), (18), DFS, AUTO-BW
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country GF: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -554,7 +565,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country GT: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -568,6 +579,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country GY:
 	(2402 - 2482 @ 40), (30)
@@ -579,12 +592,16 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: FCC/EU
+	(57240 - 65880 @ 2160), (40)
 
 country HN:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (24)
 	(5490 - 5730 @ 160), (24)
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country HR: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -601,7 +618,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country HT: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -625,7 +642,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country ID:
 	# ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
@@ -637,7 +654,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	# 5.9ghz band
 	# reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
 	(5850 - 5870 @ 10), (30)
@@ -647,13 +663,15 @@
 	(5890 - 5910 @ 10), (30)
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
-        # 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country IL: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	# 60 gHz band channels 1-4, base on Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country IN:
 	(2402 - 2482 @ 40), (20)
@@ -679,7 +697,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country IT: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -696,7 +714,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country JM: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -704,11 +722,15 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country JO:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23)
 	(5735 - 5835 @ 80), (23)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	((57000 - 66000 @ 2160), (40)
 
 country JP: DFS-JP
 	(2402 - 2482 @ 40), (20)
@@ -716,9 +738,8 @@
 	(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
 	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
 	(5490 - 5710 @ 160), (20), DFS
-	# 60 GHz band channels 2-4 at 10mW,
-	# ref: http://www.arib.or.jp/english/html/overview/doc/1-STD-T74v1_1.pdf
-	(59000 - 66000 @ 2160), (10 mW)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country KE: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -747,7 +768,7 @@
 	(5735 - 5835 @ 80), (30)
 	# 60 GHz band channels 1-4,
 	# ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
-	(57000 - 66000 @ 2160), (43)
+	(57240 - 65880 @ 2160), (43)
 
 country KP: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -799,6 +820,8 @@
 	(5890 - 5910 @ 10), (30)
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country LK: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -828,7 +851,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country LU: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -845,7 +868,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country LV: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -862,12 +885,14 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country MA: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country MC: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -905,8 +930,6 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country MN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -956,7 +979,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country MU: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -983,6 +1006,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country MY: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -990,6 +1015,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5650 @ 160), (24), DFS
 	(5735 - 5815 @ 80), (24)
+	# 60 gHz band channels 1-3
+	(57240 - 63720 @ 2160), (40)
 
 country NA: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1009,6 +1036,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country NL: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1025,7 +1054,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country NO: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1042,7 +1071,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country NP:
 	(2402 - 2482 @ 40), (20)
@@ -1055,6 +1084,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country OM: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1094,6 +1125,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40)
 
 country PK:
 	(2402 - 2482 @ 40), (30)
@@ -1114,7 +1147,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country PM: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1151,7 +1184,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country PW: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -1166,6 +1199,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country QA:
 	(2402 - 2482 @ 40), (20)
@@ -1192,7 +1227,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 # Source:
 # http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf
@@ -1202,13 +1237,15 @@
         (5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country RU:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (23)
 	(5490 - 5730 @ 160), (30)
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country RW: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1238,7 +1275,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country SG: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1246,6 +1283,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4, ref: Etsi En 302 567
+	(57000 - 66000 @ 2160), (40), NO-OUTDOOR
 
 country SI: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1262,7 +1301,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country SK: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1279,7 +1318,7 @@
 	(5900 - 5920 @ 10), (30)
 	(5910 - 5930 @ 10), (30)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57000 - 66000 @ 2160), (40)
 
 country SN:
 	(2402 - 2482 @ 40), (20)
@@ -1327,6 +1366,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country TN: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1338,14 +1379,14 @@
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
-	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country TT:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (27)
 	(5490 - 5730 @ 160), (36)
 	(5735 - 5835 @ 80), (36)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country TW: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -1353,6 +1394,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-3, FCC
+	(57240 - 63720 @ 2160), (40)
 
 country TZ:
 	(2402 - 2482 @ 40), (20)
@@ -1372,7 +1415,7 @@
 	(5490 - 5670 @ 160), (20), DFS
 	(5735 - 5835 @ 80), (20)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
-	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
+	(57240 - 65880 @ 2160), (20)
 
 country UG: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1401,14 +1444,16 @@
 	(5910 - 5930 @ 10), (30)
 	# 60g band
 	# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
-	# channels 1,2,3, EIRP=40dBm(43dBm peak)
-	(57240 - 63720 @ 2160), (40)
+	# channels 1,2,3,4,5,6 EIRP=40dBm(43dBm peak)
+	(57240 - 70200 @ 2160), (40)
 
 country UY: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
         (5250 - 5330 @ 80), (23), DFS, AUTO-BW
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country UZ: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -1440,6 +1485,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40)
 
 country VU: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -1466,7 +1513,6 @@
 	(5170 - 5250 @ 80), (20), NO-IR, AUTO-BW, NO-OUTDOOR
 	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
 	(5490 - 5710 @ 160), (20), DFS
-	(59000 - 66000 @ 2160), (10 mW)
 
 country YE:
 	(2402 - 2482 @ 40), (20)
@@ -1483,6 +1529,8 @@
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
+	# 60 gHz band channels 1-4
+	(57240 - 65880 @ 2160), (40), NO-OUTDOOR
 
 country ZW: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index e318878..35ad69f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -70,7 +70,7 @@
 MODULE_PARM_DESC(bss_entries_limit,
                  "limit to number of scan BSS entries (per wiphy, default 1000)");
 
-#define IEEE80211_SCAN_RESULT_EXPIRE	(7 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE	(30 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
 {
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 7675d11..68637c9 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -64,6 +64,11 @@
 include scripts/Makefile.host
 endif
 
+# Do not include host rules unless needed
+ifneq ($(dtbo-y),)
+include scripts/Makefile.dtbo
+endif
+
 ifneq ($(KBUILD_SRC),)
 # Create output directory if not already present
 _dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
diff --git a/scripts/Makefile.dtbo b/scripts/Makefile.dtbo
new file mode 100644
index 0000000..db4a0f4
--- /dev/null
+++ b/scripts/Makefile.dtbo
@@ -0,0 +1,24 @@
+__dtbo := $(sort $(dtbo-y))
+
+dtbo-base	:= $(sort $(foreach m,$(__dtbo),$($(m)-base)))
+dtbo := $(foreach m,$(__dtbo),$(if $($(m)-base),$(m)))
+
+__dtbo     := $(addprefix $(obj)/,$(__dtbo))
+dtbo-base	:= $(addprefix $(obj)/,$(dtbo-base))
+dtbo	:= $(addprefix $(obj)/,$(dtbo))
+
+ifneq ($(DTC_OVERLAY_TEST_EXT),)
+DTC_OVERLAY_TEST = $(DTC_OVERLAY_TEST_EXT)
+quiet_cmd_dtbo_verify	= VERIFY  $@
+cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).dtb
+else
+cmd_dtbo_verify = true
+endif
+
+$(obj)/%.dtbo: $(src)/%.dts FORCE
+	$(call if_changed_dep,dtc)
+	$(call if_changed,dtbo_verify)
+
+$(call multi_depend, $(dtbo), , -base)
+
+always +=  $(dtbo)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index d3d3320..f156681 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -301,6 +301,12 @@
 $(obj)/%.dtb.S: $(obj)/%.dtb
 	$(call cmd,dt_S_dtb)
 
+ifneq ($(DTC_EXT),)
+DTC = $(DTC_EXT)
+else
+DTC = $(objtree)/scripts/dtc/dtc
+endif
+
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
 	$(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index f93db0e..a0d45ef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2438,15 +2438,6 @@
 						     $herecurr);
 					}
 					$shorttext = AFTER_SHORTTEXT;
-				} elsif (length($line) > (SHORTTEXT_LIMIT +
-							  $shorttext_exspc)
-					 && $line !~ /^:([0-7]{6}\s){2}
-						      ([[:xdigit:]]+\.*
-						       \s){2}\w+\s\w+/xms) {
-					WARN("LONG_COMMIT_TEXT",
-					     "commit text line over " .
-					     SHORTTEXT_LIMIT .
-					     " characters\n" . $herecurr);
 				} elsif ($line=~/^\s*change-id:/i ||
 					 $line=~/^\s*signed-off-by:/i ||
 					 $line=~/^\s*crs-fixed:/i ||
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index 2a48022..2eb4aec 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -1,7 +1,9 @@
 # scripts/dtc makefile
 
 hostprogs-y	:= dtc
+ifeq ($(DTC_EXT),)
 always		:= $(hostprogs-y)
+endif
 
 dtc-objs	:= dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
 		   srcpos.o checks.o util.o
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 3b693e9..12ba833 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -28,19 +28,16 @@
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
-	int max_count = 5 * HZ;
+	int warn_count = 5 * HZ;
 
 	if (atomic_read(lockp) < 0) {
 		pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
 		return;
 	}
 	while (atomic_read(lockp) > 0) {
-		if (max_count == 0) {
-			pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
-			break;
-		}
+		if (warn_count-- == 0)
+			pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
 		schedule_timeout_uninterruptible(1);
-		max_count--;
 	}
 }
 
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index f676931..c3768cd 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -45,7 +45,7 @@
 
 	struct snd_rawmidi_substream *substream;
 	snd_fw_async_midi_port_fill fill;
-	unsigned int consume_bytes;
+	int consume_bytes;
 };
 
 int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index e629b88..474b06d 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -226,11 +226,11 @@
 	if (err < 0)
 		goto error;
 
-	err = detect_quirks(oxfw);
+	err = snd_oxfw_stream_discover(oxfw);
 	if (err < 0)
 		goto error;
 
-	err = snd_oxfw_stream_discover(oxfw);
+	err = detect_quirks(oxfw);
 	if (err < 0)
 		goto error;
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 9c6f471..17224de 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -945,7 +945,7 @@
 	tristate
 
 config SND_SOC_WCD_SPI
-	depends on CONFIG_SPI
+	depends on SPI
 	tristate
 
 config SND_SOC_WL1273
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 4b6fcb0b..c0a32f3 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -8516,6 +8516,7 @@
 	{WCD934X_HPH_L_TEST, 0x01, 0x01},
 	{WCD934X_HPH_R_TEST, 0x01, 0x01},
 	{WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20},
+	{WCD934X_MBHC_NEW_CTL_2, 0x0C, 0x00},
 };
 
 static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 4c8ff29..d5873ee 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -621,7 +621,7 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.platform_name = "sst-mfld-platform",
-		.ignore_suspend = 1,
+		.nonatomic = true,
 		.dynamic = 1,
 		.dpcm_playback = 1,
 		.dpcm_capture = 1,
@@ -634,7 +634,6 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.platform_name = "sst-mfld-platform",
-		.ignore_suspend = 1,
 		.nonatomic = true,
 		.dynamic = 1,
 		.dpcm_playback = 1,
@@ -661,6 +660,7 @@
 						| SND_SOC_DAIFMT_CBS_CFS,
 		.be_hw_params_fixup = byt_rt5640_codec_fixup,
 		.ignore_suspend = 1,
+		.nonatomic = true,
 		.dpcm_playback = 1,
 		.dpcm_capture = 1,
 		.init = byt_rt5640_init,
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 35f591e..eabff3a 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -235,7 +235,6 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.platform_name = "sst-mfld-platform",
-		.ignore_suspend = 1,
 		.nonatomic = true,
 		.dynamic = 1,
 		.dpcm_playback = 1,
@@ -249,7 +248,6 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.platform_name = "sst-mfld-platform",
-		.ignore_suspend = 1,
 		.nonatomic = true,
 		.dynamic = 1,
 		.dpcm_playback = 1,
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 2b8c9c7..304bf47 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -155,6 +155,21 @@
 	u32 index;
 };
 
+enum pinctrl_pin_state {
+	STATE_DISABLE = 0, /* All pins are in sleep state */
+	STATE_MI2S_ACTIVE,  /* IS2 = active, TDM = sleep */
+	STATE_TDM_ACTIVE,  /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *mi2s_disable;
+	struct pinctrl_state *tdm_disable;
+	struct pinctrl_state *mi2s_active;
+	struct pinctrl_state *tdm_active;
+	enum pinctrl_pin_state curr_state;
+};
+
 struct msm_asoc_mach_data {
 	u32 mclk_freq;
 	int us_euro_gpio; /* used by gpio driver API */
@@ -162,6 +177,7 @@
 	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
 	struct snd_info_entry *codec_root;
+	struct msm_pinctrl_info pinctrl_info;
 };
 
 struct msm_asoc_wcd93xx_codec {
@@ -170,6 +186,9 @@
 	void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
 };
 
+static const char *const pin_states[] = {"sleep", "i2s-active",
+					 "tdm-active"};
+
 enum {
 	TDM_0 = 0,
 	TDM_1,
@@ -509,6 +528,7 @@
 	.key_code[7] = 0,
 	.linein_th = 5000,
 	.moisture_en = true,
+	.mbhc_micbias = MIC_BIAS_2,
 	.anc_micbias = MIC_BIAS_2,
 	.enable_anc_mic_detect = false,
 };
@@ -3802,6 +3822,275 @@
 	return ret;
 }
 
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+				enum pinctrl_pin_state new_state)
+{
+	int ret = 0;
+	int curr_state = 0;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+	curr_state = pinctrl_info->curr_state;
+	pinctrl_info->curr_state = new_state;
+	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+		 pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+	if (curr_state == pinctrl_info->curr_state) {
+		pr_debug("%s: Already in same state\n", __func__);
+		goto err;
+	}
+
+	if (curr_state != STATE_DISABLE &&
+		pinctrl_info->curr_state != STATE_DISABLE) {
+		pr_debug("%s: state already active cannot switch\n", __func__);
+		ret = -EIO;
+		goto err;
+	}
+
+	switch (pinctrl_info->curr_state) {
+	case STATE_MI2S_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_active);
+		if (ret) {
+			pr_err("%s: MI2S state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_TDM_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_active);
+		if (ret) {
+			pr_err("%s: TDM state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_DISABLE:
+		if (curr_state == STATE_MI2S_ACTIVE) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+		} else {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_disable);
+		}
+		if (ret) {
+			pr_err("%s:  state disable failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	default:
+		pr_err("%s: TLMM pin state is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+err:
+	return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info->pinctrl) {
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		pinctrl_info->pinctrl = NULL;
+	}
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = NULL;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	pinctrl_info->pinctrl = pinctrl;
+
+	/* get all the states handles from Device Tree */
+	pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-sleep");
+	if (IS_ERR(pinctrl_info->mi2s_disable)) {
+		pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-active");
+	if (IS_ERR(pinctrl_info->mi2s_active)) {
+		pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-sleep");
+	if (IS_ERR(pinctrl_info->tdm_disable)) {
+		pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-active");
+	if (IS_ERR(pinctrl_info->tdm_active)) {
+		pr_err("%s: could not get tdm_active pinstate\n",
+			__func__);
+		goto err;
+	}
+	/* Reset the TLMM pins to a default state */
+	ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+	if (ret != 0) {
+		pr_err("%s: Disable TLMM pins failed with %d\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+	pinctrl_info->curr_state = STATE_DISABLE;
+
+	return 0;
+
+err:
+	devm_pinctrl_put(pinctrl);
+	pinctrl_info->pinctrl = NULL;
+	return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+	} else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+	} else {
+		pr_err("%s: dai id 0x%x not supported\n",
+			__func__, cpu_dai->id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+		__func__, cpu_dai->id, channels->max, rate->max,
+		params_format(params));
+
+	return 0;
+}
+
+static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+				     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	int channels, slot_width, slots;
+	unsigned int slot_mask;
+	unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+	pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+	slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+	/*2 slot config - bits 0 and 1 set for the first two slots */
+	slot_mask = 0x0000FFFF >> (16-slots);
+	slot_width = 32;
+	channels = slots;
+
+	pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("%s: slot_width %d\n", __func__, slot_width);
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+			slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+			0, NULL, channels, slot_offset);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	} else {
+		pr_err("%s: invalid use case, err:%d\n",
+			__func__, ret);
+	}
+
+end:
+	return ret;
+}
+
+static int sdm845_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static void sdm845_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+}
+
+static struct snd_soc_ops sdm845_tdm_be_ops = {
+	.hw_params = sdm845_tdm_snd_hw_params,
+	.startup = sdm845_tdm_snd_startup,
+	.shutdown = sdm845_tdm_snd_shutdown
+};
+
 static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
 {
 	int ret = 0;
@@ -3809,6 +4098,9 @@
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int index = cpu_dai->id;
 	unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -3822,6 +4114,14 @@
 			__func__, cpu_dai->id);
 		goto err;
 	}
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret) {
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+			goto err;
+		}
+	}
 	/*
 	 * Muxtex protection in case the same MI2S
 	 * interface using for both TX and RX  so
@@ -3874,6 +4174,9 @@
 	int ret;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	int index = rtd->cpu_dai->id;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -3892,6 +4195,13 @@
 		}
 	}
 	mutex_unlock(&mi2s_intf_conf[index].lock);
+
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret)
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+	}
 }
 
 static struct snd_soc_ops msm_mi2s_be_ops = {
@@ -4929,8 +5239,8 @@
 		.no_pcm = 1,
 		.dpcm_playback = 1,
 		.id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
-		.be_hw_params_fixup = msm_be_hw_params_fixup,
-		.ops = &msm_tdm_be_ops,
+		.be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+		.ops = &sdm845_tdm_be_ops,
 		.ignore_suspend = 1,
 	},
 	{
@@ -6296,6 +6606,17 @@
 		dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
 			ret);
 
+	/* Parse pinctrl info from devicetree */
+	ret = msm_get_pinctrl(pdev);
+	if (!ret) {
+		pr_debug("%s: pinctrl parsing successful\n", __func__);
+	} else {
+		dev_dbg(&pdev->dev,
+			"%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+			__func__, ret);
+		ret = 0;
+	}
+
 	msm_i2s_auxpcm_init(pdev);
 
 	is_initial_boot = true;
@@ -6307,6 +6628,7 @@
 
 	return 0;
 err:
+	msm_release_pinctrl(pdev);
 	devm_kfree(&pdev->dev, pdata);
 	return ret;
 }
@@ -6323,6 +6645,7 @@
 	}
 	msm_i2s_auxpcm_deinit();
 
+	msm_release_pinctrl(pdev);
 	snd_soc_unregister_card(card);
 	return 0;
 }