Merge "ARM: dts: msm: Support speedbin 1 frequencies on SDM845" into msm-4.9
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 06b219a..3c8a79a 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -28,6 +28,7 @@
   - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
   - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
   - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,request-bw-before-clk : optional, indicates if the HW supports bandwidth requests prior to clock controls.
   - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
 
   - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index ffba081..b043a93 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -193,6 +193,8 @@
 					"dsi_cmd_mode" = enable command mode.
 - qcom,5v-boost-gpio:			Specifies the panel gpio for display 5v boost.
 - qcom,mdss-dsi-te-check-enable:	Boolean to enable Tear Check configuration.
+- qcom,mdss-dsi-te-using-wd:		Boolean entry enables the watchdog timer support to generate the vsync signal
+					for command mode panel. By default, panel TE will be used to generate the vsync.
 - qcom,mdss-dsi-te-using-te-pin:	Boolean to specify whether using hardware vsync.
 - qcom,mdss-dsi-te-pin-select:		Specifies TE operating mode.
 					0 = TE through embedded dcs command
@@ -568,6 +570,7 @@
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
new file mode 100644
index 0000000..d62910a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -0,0 +1,147 @@
+* Qualcomm Technologies, Inc. MSM Camera CDM
+
+CDM (Camera Data Mover) is module intended to provide means for fast programming
+camera registers and lookup tables.
+
+=======================
+Required Node Structure
+=======================
+CDM Interface node takes care of the handling has HW nodes and provide interface
+for camera clients.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cam-cdm-intf".
+
+- num-hw-cdm
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported HW blocks.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM interface.
+
+Example:
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "vfe",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+	};
+
+=======================
+Required Node Structure
+=======================
+CDM HW node provides interface for camera clients through
+to CDM interface node.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas-cdm".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM HW node.
+
+Example:
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm0";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"titan_top_ahb_clk",
+			"cam_axi_clk",
+			"camcc_slow_ahb_clk_src",
+			"cpas_top_ahb_clk",
+			"camnoc_axi_clk";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
+		cdm-client-names = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
new file mode 100644
index 0000000..a61bab3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -0,0 +1,282 @@
+* Qualcomm Technologies, Inc. MSM Camera CPAS
+
+The MSM camera CPAS device provides dependency definitions for
+enabling Camera CPAS HW and provides the Client definitions
+for all HW blocks that use CPAS driver for BW voting. These
+definitions consist of various properties that define the list
+of clients supported, AHB, AXI master-slave IDs used for BW
+voting.
+
+=======================
+Required Node Structure
+=======================
+The camera CPAS device must be described in four levels of device nodes. The
+first level describes the overall CPAS device. Within it, second level nodes
+describe the list of AXI ports that map different clients for AXI BW voting.
+Third level nodes describe the details of each AXI port having name, mnoc,
+camnoc AXI Bus information. Fourth level nodes describe the details of Bus
+master-slave IDs, ab, ib values for mnoc, camnoc bus interface.
+
+==================================
+First Level Node - CAM CPAS device
+==================================
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cpas".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas_top" or "camss_top".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CAMNOC HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CPAS HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CPAS HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CPAS HW.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- client-id-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether CPAS clients are ID based.
+
+- client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CPAS.
+
+- client-axi-port-names
+  Usage: required
+  Value type: <string>
+  Definition: AXI Port name for each client.
+
+- client-bus-camnoc-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether Clients are connected
+              through CAMNOC for AXI access.
+
+===================================================================
+Third Level Node - CAM AXI Port properties
+===================================================================
+- qcom,axi-port-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the AXI Port.
+
+===================================================================
+Fourth Level Node - CAM AXI Bus properties
+===================================================================
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- qcom,msm-bus-vector-dyn-vote
+  Usage: optional
+  Value type: <empty>
+  Definition: Bool property specifying whether this bus client
+              is dynamic vote based.
+
+Example:
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
new file mode 100644
index 0000000..f9a5e0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
@@ -0,0 +1,111 @@
+* Qualcomm Technologies, Inc. MSM Camera IFE CSID
+
+Camera IFE CSID device provides the definitions for enabling
+the IFE CSID hardware. It also provides the functions for the client
+to control the IFE CSID hardware.
+
+=======================
+Required Node Structure
+=======================
+The IFE CSID device is described in one level of the device node.
+
+======================================
+First Level Node - CAM IFE CSID device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,csid170" or "qcom,csid-lite170".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should be "csid".
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with IFE CSID HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for IFE CSID HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+                "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for IFE CSID HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for IFE CSID HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+
+
+Example:
+
+	qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid170";
+		reg = <0xacb3000 0x1000>;
+		reg-names = "csid";
+		interrupts = <0 464 0>;
+		interrupt-names = "csid";
+		vdd-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>;
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 384000000>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
new file mode 100644
index 0000000..13aae64
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
@@ -0,0 +1,31 @@
+* Qualcomm Technologies, Inc. MSM Camera ISP
+
+The MSM camera ISP driver provides the definitions for enabling
+the Camera ISP hadware. It provides the functions for the Client to
+control the ISP hardware.
+
+=======================
+Required Node Structure
+=======================
+The camera ISP device is described in one level of device node.
+
+==================================
+First Level Node - CAM ISP device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-isp".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "vfe" or "ife".
+
+Example:
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 8efa85d..0c6a9f2 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -213,6 +213,18 @@
 		    target quotient adjustment due to an ACD up recommendation.
 		    Valid values are 0 through 3.
 
+- qcom,cpr-acd-notwait-for-cl-settled
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates ACD down recommendations do not
+		    need to wait for CPR closed-loop to settle.
+
+- qcom,cpr-acd-avg-fast-update
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates CPR should issue immediate
+		    voltage updates following ACD requests.
+
 - qcom,cpr-acd-avg-enable
 	Usage:      optional
 	Value type: <empty>
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 5601276..e305163 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -135,6 +135,10 @@
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_PRIO=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
 CONFIG_RFKILL=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 241aa71..1f08294 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -58,6 +58,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-panel-hdr-enabled;
 		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 509547f..36f36fb 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -55,6 +55,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
 			05 01 00 00 0a 00 01 00
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 6569219..122299c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -27,7 +27,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
 };
 
 &dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 2e893de..55e615c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -27,7 +27,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
 };
 
 &dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 3b9c26f..9ead234 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -329,4 +329,173 @@
 			};
 		};
 	};
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"csiphy0", "csiphy1", "csiphy2", "cci0",
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
+
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		cell-index = <0>;
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "ife",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+		status = "ok";
+	};
+
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		reg-cam-base = <0x48000>;
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_camera_ahb",
+			"gcc_camera_axi",
+			"cam_cc_soc_ahb_clk",
+			"cam_cc_cpas_ahb_clk",
+			"cam_cc_camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates = <0 0 0 0 0>;
+		cdm-client-names = "vfe";
+		status = "ok";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index af28003..2c9c012 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -229,6 +229,30 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index d316d63..e4261e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -94,6 +94,30 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 4a8d06d..1ac661d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -118,3 +118,32 @@
 
 	status = "ok";
 };
+
+&labibb {
+	status = "ok";
+	qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index ac3352e..74bb133 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -291,6 +291,78 @@
 		ibb-supply = <&ibb_regulator>;
 	};
 
+	dsi_sim_vid_display: qcom,dsi-display@8 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_vid>;
+	};
+
+	dsi_dual_sim_vid_display: qcom,dsi-display@9 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_vid>;
+	};
+
+	dsi_sim_cmd_display: qcom,dsi-display@10 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_cmd>;
+	};
+
+	dsi_dual_sim_cmd_display: qcom,dsi-display@11 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_cmd>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -299,7 +371,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_dual_nt35597_truly_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_video_display>;
 };
 
 &dsi_dual_nt35597_truly_video {
@@ -366,3 +438,27 @@
 				<1 0 2>;
 	qcom,default-topology-index = <0>;
 };
+
+&dsi_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 9c497fa..df2e0c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -37,8 +37,8 @@
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
-		iommus = <&apps_smmu 0x880 0x0>, <&apps_smmu 0x888 0x0>,
-			<&apps_smmu 0xc80 0x0>, <&apps_smmu 0xc88 0x0>;
+		iommus = <&apps_smmu 0x880 0x8>,
+			<&apps_smmu 0xc80 0x8>;
 
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -121,6 +121,7 @@
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
 		qcom,sde-has-dim-layer;
+		qcom,sde-has-idle-pc;
 		qcom,sde-max-bw-low-kbps = <9600000>;
 		qcom,sde-max-bw-high-kbps = <9600000>;
 		qcom,sde-dram-channels = <2>;
@@ -177,7 +178,6 @@
 	};
 
 	sde_rscc: qcom,sde_rscc@af20000 {
-		status = "disabled";
 		cell-index = <0>;
 		compatible = "qcom,sde-rsc";
 		reg = <0xaf20000 0x1c44>,
@@ -186,13 +186,16 @@
 		qcom,sde-rsc-version = <1>;
 
 		vdd-supply = <&mdss_core_gdsc>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
-		clock-names = "iface_clk", "vsync_clk";
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>;
+		clock-names = "vsync_clk", "iface_clk";
 		clock-rate = <0 0>;
 
 		qcom,sde-dram-channels = <2>;
 
+		mboxes = <&disp_rsc 0>;
+		mbox-names = "disp_rsc";
+
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
 			qcom,msm-bus,name = "disp_rsc";
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index dccfcf4..052e3d8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -880,6 +880,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -899,6 +900,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -1012,6 +1014,11 @@
 		#reset-cells = <1>;
 	};
 
+	cpucc_debug: syscon@17970018 {
+		compatible = "syscon";
+		reg = <0x17970018 0x4>;
+	};
+
 	clock_cpucc: qcom,cpucc@0x17d41000 {
 		compatible = "qcom,clk-cpu-osm";
 		reg = <0x17d41000 0x1400>,
@@ -1215,6 +1222,7 @@
 		qcom,camcc = <&clock_camcc>;
 		qcom,dispcc = <&clock_dispcc>;
 		qcom,gpucc = <&clock_gpucc>;
+		qcom,cpucc = <&cpucc_debug>;
 		clock-names = "xo_clk_src";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		#clock-cells = <1>;
@@ -1262,7 +1270,7 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
@@ -1288,13 +1296,12 @@
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk",
 			"rx_lane1_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_PHY_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
-			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1383,7 +1390,7 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
@@ -1407,13 +1414,12 @@
 			"ref_clk",
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_CARD_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_CLK>,
+			<&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
-			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
@@ -1779,66 +1785,54 @@
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1401 0x0>,
-				 <&apps_smmu 0x1421 0x0>;
+			iommus = <&apps_smmu 0x1401 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb2 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1402 0x0>,
-				 <&apps_smmu 0x1422 0x0>;
+			iommus = <&apps_smmu 0x1402 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb3 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1403 0x0>,
-				 <&apps_smmu 0x1423 0x0>;
+			iommus = <&apps_smmu 0x1403 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb4 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1404 0x0>,
-				 <&apps_smmu 0x1424 0x0>;
+			iommus = <&apps_smmu 0x1404 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb5 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1405 0x0>,
-				 <&apps_smmu 0x1425 0x0>;
+			iommus = <&apps_smmu 0x1405 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb6 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1406 0x0>,
-				 <&apps_smmu 0x1426 0x0>;
+			iommus = <&apps_smmu 0x1406 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb7 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1407 0x0>,
-				 <&apps_smmu 0x1427 0x0>;
+			iommus = <&apps_smmu 0x1407 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb8 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1408 0x0>,
-				 <&apps_smmu 0x1428 0x0>;
+			iommus = <&apps_smmu 0x1408 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb9 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x1409 0x0>,
-				 <&apps_smmu 0x1419 0x0>,
-				 <&apps_smmu 0x1429 0x0>;
+			iommus = <&apps_smmu 0x1409 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb10 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x140A 0x0>,
-				 <&apps_smmu 0x141A 0x0>,
-				 <&apps_smmu 0x142A 0x0>;
+			iommus = <&apps_smmu 0x140A 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb11 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
@@ -3728,14 +3722,14 @@
 &clock_cpucc {
 	lmh_dcvs0: qcom,limits-dcvs@0 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <0>;
 		#thermal-sensor-cells = <0>;
 	};
 
 	lmh_dcvs1: qcom,limits-dcvs@1 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <1>;
 		#thermal-sensor-cells = <0>;
 	};
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index f18ae62..4256d9b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1178,11 +1178,11 @@
 	ret = fw_get_filesystem_firmware(device, fw->priv);
 	if (ret) {
 		if (!(opt_flags & FW_OPT_NO_WARN))
-			dev_warn(device,
-				 "Direct firmware load for %s failed with error %d\n",
+			dev_dbg(device,
+				 "Firmware %s was not found in kernel paths. rc:%d\n",
 				 name, ret);
 		if (opt_flags & FW_OPT_USERHELPER) {
-			dev_warn(device, "Falling back to user helper\n");
+			dev_dbg(device, "Falling back to user helper\n");
 			ret = fw_load_from_user_helper(fw, name, device,
 						       opt_flags, timeout);
 		}
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index e23a713..5fb870b 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -40,6 +40,7 @@
 #include "clk-regmap.h"
 #include "clk-rcg.h"
 #include "clk-voter.h"
+#include "clk-debug.h"
 
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
@@ -372,6 +373,7 @@
 	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_ops clk_ops_core;
@@ -507,6 +509,7 @@
 	.list_rate = clk_osm_list_rate,
 	.recalc_rate = l3_clk_recalc_rate,
 	.set_rate = l3_clk_set_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_init_data osm_clks_init[] = {
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d30675c..44c5b81 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -272,6 +272,9 @@
 	"video_cc_venus_ahb_clk",
 	"video_cc_venus_ctl_axi_clk",
 	"video_cc_venus_ctl_core_clk",
+	"l3_clk",
+	"pwrcl_clk",
+	"perfcl_clk",
 };
 
 static struct clk_debug_mux gcc_debug_mux = {
@@ -766,6 +769,12 @@
 			0x4, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
 		{ "video_cc_venus_ctl_core_clk", 0x48, 4, VIDEO_CC,
 			0x1, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "l3_clk", 0xD6, 4, CPU,
+			0x46, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "pwrcl_clk", 0xD6, 4, CPU,
+			0x44, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "perfcl_clk", 0xD6, 4, CPU,
+			0x45, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
 	),
 	.hw.init = &(struct clk_init_data){
 		.name = "gcc_debug_mux",
@@ -862,6 +871,16 @@
 		}
 	}
 
+	if (of_get_property(pdev->dev.of_node, "qcom,cpucc", NULL)) {
+		gcc_debug_mux.regmap[CPU] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,cpucc");
+		if (IS_ERR(gcc_debug_mux.regmap[CPU])) {
+			pr_err("Failed to map qcom,cpucc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[CPU]);
+		}
+	}
+
 	clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 0115bb1..ae9d509 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -644,8 +644,6 @@
 		return ret;
 	}
 
-	clk_prepare_enable(gpu_cc_cxo_clk.clkr.hw.clk);
-
 	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
 
 	return ret;
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 7b4ca24..2215dc1 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -124,6 +124,7 @@
 	bool use_sw_hmac_algo;
 	bool use_sw_aes_ccm_algo;
 	bool clk_mgmt_sus_res;
+	bool req_bw_before_clk;
 	unsigned int ce_device;
 	unsigned int ce_hw_instance;
 	unsigned int max_request;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 0cf4386..8af73ac 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -121,6 +121,7 @@
 	bool support_hw_key;
 	bool support_clk_mgmt_sus_res;
 	bool support_only_core_src_clk;
+	bool request_bw_before_clk;
 
 	void __iomem *iobase;	    /* Virtual io base of CE HW  */
 	unsigned int phy_iobase;    /* Physical io base of CE HW    */
@@ -298,7 +299,7 @@
 
 	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
 
-	dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d,    Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+	dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
 			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
 			pce_dev->ce_bam_info.dest_pipe_index,
 			pce_dev->ce_bam_info.src_pipe_index,
@@ -5675,6 +5676,8 @@
 		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
 	pce_dev->support_only_core_src_clk = of_property_read_bool(
 		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+	pce_dev->request_bw_before_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,request-bw-before-clk");
 
 	if (of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,bam-pipe-pair",
@@ -5762,6 +5765,9 @@
 
 	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
 	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		if (pce_dev->request_bw_before_clk)
+			goto skip_set_rate;
+
 		rc = clk_set_rate(pce_dev->ce_core_src_clk,
 						pce_dev->ce_opp_freq_hz);
 		if (rc) {
@@ -5780,6 +5786,7 @@
 		pce_dev->ce_core_src_clk = NULL;
 	}
 
+skip_set_rate:
 	if (pce_dev->support_only_core_src_clk) {
 		pce_dev->ce_core_clk = NULL;
 		pce_dev->ce_clk = NULL;
@@ -6096,6 +6103,7 @@
 	ce_support->hw_key = pce_dev->support_hw_key;
 	ce_support->aes_ccm = true;
 	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
 	if (pce_dev->ce_bam_info.minor_version)
 		ce_support->aligned_only = false;
 	else
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index d70104d..078e198 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -54,9 +54,9 @@
 #define MON2_ZONE_CNT(m)	((m)->base + 0x2D8)
 #define MON2_ZONE_MAX(m, zone)	((m)->base + 0x2E0 + 0x4 * zone)
 
-enum bwmon_type {
-	BWMON_1,
-	BWMON_2,
+enum mon_reg_type {
+	MON1,
+	MON2,
 };
 
 struct bwmon_spec {
@@ -91,25 +91,25 @@
 
 static DEFINE_SPINLOCK(glb_lock);
 
-static __always_inline void mon_enable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
 		break;
 	}
 }
 
-static __always_inline void mon_disable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(m->throttle_adj, MON2_EN(m));
 		break;
 	}
@@ -124,13 +124,13 @@
 #define MON_CLEAR_BIT	0x1
 #define MON_CLEAR_ALL_BIT	0x2
 static __always_inline
-void mon_clear(struct bwmon *m, bool clear_all, enum bwmon_type type)
+void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		if (clear_all)
 			writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
 		else
@@ -172,19 +172,19 @@
 }
 
 static __always_inline
-void mon_irq_enable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val |= INT_ENABLE_V1;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val |= INT_STATUS_MASK_HWS;
@@ -209,20 +209,20 @@
 }
 
 static __always_inline
-void mon_irq_disable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val &= ~INT_ENABLE_V1;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
 		val &= ~INT_STATUS_MASK_HWS;
@@ -238,18 +238,18 @@
 }
 
 static __always_inline
-unsigned int mon_irq_status(struct bwmon *m, enum bwmon_type type)
+unsigned int mon_irq_status(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 mval;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
 		mval &= INT_STATUS_MASK;
 		break;
-	case BWMON_2:
+	case MON2:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
@@ -283,14 +283,14 @@
 }
 
 static __always_inline
-void mon_irq_clear(struct bwmon *m, enum bwmon_type type)
+void mon_irq_clear(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(INT_STATUS_MASK, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(INT_STATUS_MASK_HWS, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
@@ -419,7 +419,7 @@
 	unsigned long count, status;
 
 	count = readl_relaxed(MON_CNT(m));
-	status = mon_irq_status(m, BWMON_1);
+	status = mon_irq_status(m, MON1);
 
 	dev_dbg(m->dev, "Counter: %08lx\n", count);
 
@@ -469,15 +469,15 @@
 }
 
 static __always_inline
-unsigned long mon_get_count(struct bwmon *m, enum bwmon_type type)
+unsigned long mon_get_count(struct bwmon *m, enum mon_reg_type type)
 {
 	unsigned long count;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		count = mon_get_count1(m);
 		break;
-	case BWMON_2:
+	case MON2:
 		count = mon_get_zone_stats(m);
 		break;
 	}
@@ -499,7 +499,7 @@
 }
 
 static __always_inline
-unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum bwmon_type type)
+unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	unsigned long count;
@@ -515,12 +515,12 @@
 
 static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_1);
+	return __get_bytes_and_clear(hw, MON1);
 }
 
 static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_2);
+	return __get_bytes_and_clear(hw, MON2);
 }
 
 static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
@@ -529,10 +529,10 @@
 	u32 limit;
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_1);
+	mon_disable(m, MON1);
 	count = mon_get_count1(m);
-	mon_clear(m, false, BWMON_1);
-	mon_irq_clear(m, BWMON_1);
+	mon_clear(m, false, MON1);
+	mon_irq_clear(m, MON1);
 
 	if (likely(!m->spec->wrap_on_thres))
 		limit = bytes;
@@ -540,7 +540,7 @@
 		limit = max(bytes, 500000UL);
 
 	mon_set_limit(m, limit);
-	mon_enable(m, BWMON_1);
+	mon_enable(m, MON1);
 
 	return count;
 }
@@ -549,18 +549,18 @@
 {
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_2);
-	mon_clear(m, false, BWMON_2);
-	mon_irq_clear(m, BWMON_2);
+	mon_disable(m, MON2);
+	mon_clear(m, false, MON2);
+	mon_irq_clear(m, MON2);
 
 	mon_set_zones(m, sample_ms);
-	mon_enable(m, BWMON_2);
+	mon_enable(m, MON2);
 
 	return 0;
 }
 
 static irqreturn_t
-__bwmon_intr_handler(int irq, void *dev, enum bwmon_type type)
+__bwmon_intr_handler(int irq, void *dev, enum mon_reg_type type)
 {
 	struct bwmon *m = dev;
 
@@ -576,12 +576,12 @@
 
 static irqreturn_t bwmon_intr_handler(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_1);
+	return __bwmon_intr_handler(irq, dev, MON1);
 }
 
 static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_2);
+	return __bwmon_intr_handler(irq, dev, MON2);
 }
 
 static irqreturn_t bwmon_intr_thread(int irq, void *dev)
@@ -592,8 +592,8 @@
 	return IRQ_HANDLED;
 }
 
-static __always_inline int
-__start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps, enum bwmon_type type)
+static __always_inline int __start_bw_hwmon(struct bw_hwmon *hw,
+		unsigned long mbps, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	u32 limit, zone_actions;
@@ -601,11 +601,11 @@
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
 		break;
-	case BWMON_2:
+	case MON2:
 		zone_actions = calc_zone_actions();
 		handler = bwmon_intr_handler2;
 		break;
@@ -625,11 +625,11 @@
 	mon_clear(m, false, type);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		mon_set_limit(m, limit);
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_set_zones(m, hw->df->profile->polling_ms);
 		/* Set the zone actions to increment appropriate counters */
 		writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
@@ -645,16 +645,16 @@
 
 static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_1);
+	return __start_bw_hwmon(hw, mbps, MON1);
 }
 
 static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_2);
+	return __start_bw_hwmon(hw, mbps, MON2);
 }
 
 static __always_inline
-void __stop_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -667,16 +667,16 @@
 
 static void stop_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_1);
+	return __stop_bw_hwmon(hw, MON1);
 }
 
 static void stop_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_2);
+	return __stop_bw_hwmon(hw, MON2);
 }
 
 static __always_inline
-int __suspend_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+int __suspend_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -690,25 +690,25 @@
 
 static int suspend_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_1);
+	return __suspend_bw_hwmon(hw, MON1);
 }
 
 static int suspend_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_2);
+	return __suspend_bw_hwmon(hw, MON2);
 }
 
-static int __resume_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+static int __resume_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	int ret;
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		break;
-	case BWMON_2:
+	case MON2:
 		handler = bwmon_intr_handler2;
 		break;
 	}
@@ -731,12 +731,12 @@
 
 static int resume_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_1);
+	return __resume_bw_hwmon(hw, MON1);
 }
 
 static int resume_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_2);
+	return __resume_bw_hwmon(hw, MON2);
 }
 
 /*************************************************************************/
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5604bf1..c7260f9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -697,7 +697,7 @@
 	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
 	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
 		rc = PTR_ERR(bus->bus_scale_table);
-		pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+		pr_debug("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
 		bus->bus_scale_table = NULL;
 		return rc;
 	}
@@ -1256,7 +1256,7 @@
 
 	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
 	if (rc)
-		pr_err("failed to init axi bus client, rc = %d\n", rc);
+		pr_debug("failed to init axi bus client, rc = %d\n", rc);
 
 	item->ctrl = dsi_ctrl;
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3402d48..231f29b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1243,7 +1243,7 @@
 	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
 	if (IS_ERR_OR_NULL(mux->byte_clk)) {
 		rc = PTR_ERR(mux->byte_clk);
-		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
 		mux->byte_clk = NULL;
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1258,7 +1258,7 @@
 	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
 		rc = PTR_ERR(mux->pixel_clk);
 		mux->pixel_clk = NULL;
-		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
 		 * non-critical failure since these clocks are requied only for
@@ -1570,7 +1570,7 @@
 			display->lane_map.lane_map_v2[i] = BIT(temp[i]);
 		return 0;
 	} else if (rc != EINVAL) {
-		pr_warn("Incorrect mapping, configure default\n");
+		pr_debug("Incorrect mapping, configure default\n");
 		goto set_default;
 	}
 
@@ -2742,6 +2742,8 @@
 		break;
 	case DSI_OP_CMD_MODE:
 		info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+		info->is_te_using_watchdog_timer =
+			display->panel->te_using_watchdog_timer;
 		break;
 	default:
 		pr_err("unknwown dsi panel mode %d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 34aaea2..deb718c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1415,7 +1415,7 @@
 
 	data = of_get_property(of_node, cmd_set_prop_map[type], &length);
 	if (!data) {
-		pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+		pr_debug("%s commands not defined\n", cmd_set_prop_map[type]);
 		rc = -ENOTSUPP;
 		goto error;
 	}
@@ -1480,7 +1480,7 @@
 		} else {
 			rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
 			if (rc)
-				pr_err("[%s] failed to parse set %d\n",
+				pr_debug("[%s] failed to parse set %d\n",
 					panel->name, i);
 		}
 	}
@@ -1556,14 +1556,17 @@
 	return rc;
 }
 
-static int dsi_panel_parse_features(struct dsi_panel *panel,
+static int dsi_panel_parse_misc_features(struct dsi_panel *panel,
 				     struct device_node *of_node)
 {
 	panel->ulps_enabled =
 		of_property_read_bool(of_node, "qcom,ulps-enabled");
 
-	pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+	if (panel->ulps_enabled)
+		pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
 
+	panel->te_using_watchdog_timer = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-te-using-wd");
 	return 0;
 }
 
@@ -1786,8 +1789,8 @@
 					      "qcom,platform-bklight-en-gpio",
 					      0);
 	if (!gpio_is_valid(panel->bl_config.en_gpio)) {
-		pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
-		rc = -EINVAL;
+		pr_debug("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+		rc = 0;
 		goto error;
 	}
 
@@ -2280,7 +2283,7 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
-	rc = dsi_panel_parse_features(panel, of_node);
+	rc = dsi_panel_parse_misc_features(panel, of_node);
 	if (rc)
 		pr_err("failed to parse panel features, rc=%d\n", rc);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 9f63089..de2b5b1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -184,6 +184,7 @@
 	u32 panel_jitter;
 	u32 panel_prefill_lines;
 	bool panel_initialized;
+	bool te_using_watchdog_timer;
 
 	bool dsc_enabled;
 	char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 609c5ff..e2219aa 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -238,7 +238,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
@@ -296,7 +297,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 322b7f2..d50a185 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -389,6 +389,8 @@
  * @max_height:         Max height of display. In case of hot pluggable display
  *                      this is max height supported by controller
  * @is_primary:         Set to true if display is primary display
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *				 used instead of panel TE in cmd mode panels
  * @frame_rate:		Display frame rate
  * @prefill_lines:	prefill lines based on porches.
  * @vtotal:		display vertical total
@@ -412,6 +414,7 @@
 	uint32_t max_height;
 
 	bool is_primary;
+	bool is_te_using_watchdog_timer;
 	uint32_t frame_rate;
 	uint32_t prefill_lines;
 	uint32_t vtotal;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 58222f3..6593b47 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -31,18 +31,20 @@
 		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
 
 static const struct drm_prop_enum_list e_topology_name[] = {
-	{SDE_RM_TOPOLOGY_UNKNOWN,	"sde_unknown"},
+	{SDE_RM_TOPOLOGY_NONE,	"sde_none"},
 	{SDE_RM_TOPOLOGY_SINGLEPIPE,	"sde_singlepipe"},
+	{SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,	"sde_singlepipe_dsc"},
 	{SDE_RM_TOPOLOGY_DUALPIPE,	"sde_dualpipe"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSC,	"sde_dualpipe_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,	"sde_dualpipemerge"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,	"sde_dualpipemerge_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,	"sde_dualpipe_dscmerge"},
 	{SDE_RM_TOPOLOGY_PPSPLIT,	"sde_ppsplit"},
-	{SDE_RM_TOPOLOGY_DUALPIPEMERGE,	"sde_dualpipemerge"}
 };
 static const struct drm_prop_enum_list e_topology_control[] = {
 	{SDE_RM_TOPCTL_RESERVE_LOCK,	"reserve_lock"},
 	{SDE_RM_TOPCTL_RESERVE_CLEAR,	"reserve_clear"},
 	{SDE_RM_TOPCTL_DSPP,		"dspp"},
-	{SDE_RM_TOPCTL_FORCE_TILING,	"force_tiling"},
-	{SDE_RM_TOPCTL_PPSPLIT,		"ppsplit"}
 };
 static const struct drm_prop_enum_list e_power_mode[] = {
 	{SDE_MODE_DPMS_ON,	"ON"},
@@ -656,11 +658,6 @@
 				SDE_ERROR("prep fb failed, %d\n", rc);
 		}
 		break;
-	case CONNECTOR_PROP_TOPOLOGY_CONTROL:
-		rc = sde_rm_check_property_topctl(val);
-		if (rc)
-			SDE_ERROR("invalid topology_control: 0x%llX\n", val);
-		break;
 	case CONNECTOR_PROP_LP:
 		mutex_lock(&c_conn->lock);
 		c_conn->lp_mode = val;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index a136645..39127e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -152,6 +152,8 @@
  * @rc_state:			resource controller state
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
+ * @topology:                   topology of the display
+ * @mode_set_complete:          flag to indicate modeset completion
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -188,6 +190,8 @@
 	struct mutex rc_lock;
 	enum sde_enc_rc_states rc_state;
 	struct delayed_work delayed_off_work;
+	struct msm_display_topology topology;
+	bool mode_set_complete;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -249,15 +253,14 @@
 	memset(hw_res, 0, sizeof(*hw_res));
 	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
 
-	if (_sde_is_dsc_enabled(sde_enc))
-		hw_res->needs_dsc = true;
-
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys && phys->ops.get_hw_resources)
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
+
+	hw_res->topology = sde_enc->topology;
 }
 
 void sde_encoder_destroy(struct drm_encoder *drm_enc)
@@ -423,9 +426,18 @@
 	}
 
 	/* Reserve dynamic resources now. Indicating AtomicTest phase */
-	if (!ret)
-		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+	if (!ret) {
+		/*
+		 * Avoid reserving resources when mode set is pending. Topology
+		 * info may not be available to complete reservation.
+		 */
+		if (drm_atomic_crtc_needs_modeset(crtc_state)
+				&& sde_enc->mode_set_complete) {
+			ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
 				conn_state, true);
+			sde_enc->mode_set_complete = false;
+		}
+	}
 
 	if (!ret)
 		drm_mode_set_crtcinfo(adj_mode, 0);
@@ -720,7 +732,7 @@
 	int ret = 0;
 
 	topology = sde_connector_get_topology_name(drm_conn);
-	if (topology == SDE_RM_TOPOLOGY_UNKNOWN) {
+	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
 		return -EINVAL;
 	}
@@ -729,16 +741,15 @@
 	SDE_EVT32(DRMID(&sde_enc->base));
 
 	switch (topology) {
-	case SDE_RM_TOPOLOGY_SINGLEPIPE:
+	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPEMERGE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
 		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
 		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_PPSPLIT:
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
 				topology);
@@ -1098,6 +1109,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_connector *sde_conn = NULL;
 	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	int i = 0, ret;
 
@@ -1127,6 +1139,17 @@
 		return;
 	}
 
+	sde_conn = to_sde_connector(conn);
+	if (sde_conn) {
+		ret = sde_conn->ops.get_topology(adj_mode, &sde_enc->topology,
+				sde_kms->catalog->max_mixer_width);
+		if (ret) {
+			SDE_ERROR_ENC(sde_enc,
+				"invalid topology for the mode\n");
+			return;
+		}
+	}
+
 	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
 	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
 			conn->state, false);
@@ -1167,6 +1190,8 @@
 				phys->ops.mode_set(phys, mode, adj_mode);
 		}
 	}
+
+	sde_enc->mode_set_complete = true;
 }
 
 static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1174,7 +1199,10 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct sde_hw_mdp *hw_mdptop;
+	int i = 0;
 	int ret = 0;
+	struct sde_watchdog_te_status te_cfg = { 0 };
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid parameters\n");
@@ -1189,6 +1217,14 @@
 	}
 
 	sde_kms = to_sde_kms(priv->kms);
+	hw_mdptop = sde_kms->hw_mdp;
+
+	if (!hw_mdptop) {
+		SDE_ERROR("invalid mdptop\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
 		return;
@@ -1205,6 +1241,16 @@
 		if (ret)
 			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
 	}
+
+	if (hw_mdptop->ops.setup_vsync_sel) {
+		for (i = 0; i < sde_enc->num_phys_encs; i++)
+			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+		te_cfg.pp_count = sde_enc->num_phys_encs;
+		te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
+		hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
+				sde_enc->disp_info.is_te_using_watchdog_timer);
+	}
 }
 
 void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
@@ -1659,7 +1705,7 @@
 	/* don't perform flush/start operations for slave encoders */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_UNKNOWN;
+		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
 
 		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
 			continue;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index c5ddee6..b756313 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -33,15 +33,16 @@
  * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
  * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
  * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
- * @needs_dsc:	Request to allocate DSC block
- * @display_num_of_h_tiles:
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
  */
 struct sde_encoder_hw_resources {
 	enum sde_intf_mode intfs[INTF_MAX];
 	enum sde_intf_mode wbs[WB_MAX];
 	bool needs_cdm;
-	bool needs_dsc;
 	u32 display_num_of_h_tiles;
+	struct msm_display_topology topology;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index a3b112d..3d6dc32 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -432,8 +432,7 @@
 
 	topology = sde_connector_get_topology_name(phys_enc->connector);
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE &&
-			phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_NONE)
+			topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index cf54611..bd212e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -34,6 +34,17 @@
 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
 
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
 #define DCE_SEL                           0x450
 
 static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
@@ -192,6 +203,39 @@
 	status->wb[WB_3] = 0;
 }
 
+static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
+		struct sde_watchdog_te_status *cfg, bool watchdog_te)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 reg = 0;
+	int i = 0;
+	u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+
+	if (!mdp)
+		return;
+
+	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		if (watchdog_te)
+			reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+		else
+			reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+	}
+
+	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (watchdog_te) {
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+				CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
+		reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+	}
+}
+
 static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
 		struct sde_danger_safe_status *status)
 {
@@ -261,6 +305,7 @@
 	ops->setup_cdm_output = sde_hw_setup_cdm_output;
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 	ops->get_danger_status = sde_hw_get_danger_status;
+	ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
 	ops->get_safe_status = sde_hw_get_safe_status;
 	ops->setup_dce = sde_hw_setup_dce;
 	ops->reset_ubwc = sde_hw_reset_ubwc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 7511358..9cb4494 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -77,6 +77,18 @@
 };
 
 /**
+ * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: base address of ping pong info
+ */
+struct sde_watchdog_te_status {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[];
+};
+
+/**
  * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
  * Assumption is these functions will be called after clocks are enabled.
  * @setup_split_pipe : Programs the pipe control registers
@@ -142,6 +154,15 @@
 			struct sde_danger_safe_status *status);
 
 	/**
+	 * setup_vsync_sel - get vsync configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: watchdog timer configuration
+	 * @watchdog_te: watchdog timer enable
+	 */
+	void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
+			struct sde_watchdog_te_status *cfg, bool watchdog_te);
+
+	/**
 	 * get_safe_status - get safe status
 	 * @mdp: mdp top context driver
 	 * @status: Pointer to danger safe status
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 4a5479d..b68d736 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -567,7 +567,8 @@
 		.set_backlight = dsi_display_set_backlight,
 		.soft_reset   = dsi_display_soft_reset,
 		.pre_kickoff  = dsi_conn_pre_kickoff,
-		.clk_ctrl = dsi_display_clk_ctrl
+		.clk_ctrl = dsi_display_clk_ctrl,
+		.get_topology = dsi_conn_get_topology
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -575,7 +576,8 @@
 		.get_modes =    sde_wb_connector_get_modes,
 		.set_property = sde_wb_connector_set_property,
 		.get_info =     sde_wb_get_info,
-		.soft_reset =   NULL
+		.soft_reset =   NULL,
+		.get_topology = sde_wb_get_topology
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 66318b3..427a93b 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -32,22 +32,40 @@
 #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
 #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
 #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
-#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
-#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct sde_rm_topology_def {
+	enum sde_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct sde_rm_topology_def g_top_table[] = {
+	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
+};
 
 /**
  * struct sde_rm_requirements - Reservation requirements parameter bundle
- * @top_name:	DRM<->HW topology use case user is trying to enable
- * @dspp:	Whether the user requires a DSPP
- * @num_lm:	Number of layer mixers needed in the use case
- * @hw_res:	Hardware resources required as reported by the encoders
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
  */
 struct sde_rm_requirements {
-	enum sde_rm_topology_name top_name;
 	uint64_t top_ctrl;
-	int num_lm;
-	int num_ctl;
-	bool needs_split_display;
+	const struct sde_rm_topology_def *topology;
 	struct sde_encoder_hw_resources hw_res;
 };
 
@@ -607,7 +625,7 @@
 	}
 
 	pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
-	if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+	if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
 			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
 		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
 		*dspp = NULL;
@@ -630,14 +648,15 @@
 	int lm_count = 0;
 	int i, rc = 0;
 
-	if (!reqs->num_lm) {
-		SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+	if (!reqs->topology->num_lm) {
+		SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
 		return -EINVAL;
 	}
 
 	/* Find a primary mixer */
 	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
-	while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+	while (lm_count != reqs->topology->num_lm &&
+			sde_rm_get_hw(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
 		memset(&pp, 0, sizeof(pp));
@@ -655,7 +674,8 @@
 		/* Valid primary mixer found, find matching peers */
 		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
 
-		while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+		while (lm_count != reqs->topology->num_lm &&
+				sde_rm_get_hw(rm, &iter_j)) {
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
@@ -669,7 +689,7 @@
 		}
 	}
 
-	if (lm_count != reqs->num_lm) {
+	if (lm_count != reqs->topology->num_lm) {
 		SDE_DEBUG("unable to find appropriate mixers\n");
 		return -ENAVAIL;
 	}
@@ -687,7 +707,7 @@
 				dspp[i] ? dspp[i]->id : 0);
 	}
 
-	if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+	if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
 		/* reserve a free PINGPONG_SLAVE block */
 		rc = -ENAVAIL;
 		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
@@ -713,7 +733,7 @@
 static int _sde_rm_reserve_ctls(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter;
@@ -735,23 +755,23 @@
 
 		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
 
-		if (reqs->needs_split_display != has_split_display)
+		if (top->needs_split_display != has_split_display)
 			continue;
 
-		if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+		if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
 			continue;
 
 		ctls[i] = iter.blk;
 		SDE_DEBUG("ctl %d match\n", iter.blk->id);
 
-		if (++i == reqs->num_ctl)
+		if (++i == top->num_ctl)
 			break;
 	}
 
-	if (i != reqs->num_ctl)
+	if (i != top->num_ctl)
 		return -ENAVAIL;
 
-	for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
 		ctls[i]->rsvp_nxt = rsvp;
 		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
 	}
@@ -762,13 +782,13 @@
 static int _sde_rm_reserve_dsc(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_iter iter;
 	int alloc_count = 0;
-	int num_dsc_enc = reqs->num_lm;
+	int num_dsc_enc = top->num_lm;
 
-	if (!reqs->hw_res.needs_dsc)
+	if (!top->num_comp_enc)
 		return 0;
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
@@ -912,11 +932,12 @@
 		struct sde_rm_requirements *reqs)
 {
 	int ret;
+	struct sde_rm_topology_def topology;
 
 	/* Create reservation info, tag reserved blocks with it as we go */
 	rsvp->seq = ++rm->rsvp_next_seq;
 	rsvp->enc_id = enc->base.id;
-	rsvp->topology = reqs->top_name;
+	rsvp->topology = reqs->topology->top_name;
 	list_add_tail(&rsvp->list, &rm->rsvps);
 
 	/*
@@ -941,10 +962,11 @@
 	 * - Check mixers without Split Display
 	 * - Only then allow to grab from CTLs with split display capability
 	 */
-	_sde_rm_reserve_ctls(rm, rsvp, reqs);
-	if (ret && !reqs->needs_split_display) {
-		reqs->needs_split_display = true;
-		_sde_rm_reserve_ctls(rm, rsvp, reqs);
+	_sde_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_sde_rm_reserve_ctls(rm, rsvp, &topology);
 	}
 	if (ret) {
 		SDE_ERROR("unable to find appropriate CTL\n");
@@ -956,7 +978,7 @@
 	if (ret)
 		return ret;
 
-	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs);
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology);
 	if (ret)
 		return ret;
 
@@ -971,37 +993,7 @@
 		struct sde_rm_requirements *reqs)
 {
 	const struct drm_display_mode *mode = &crtc_state->mode;
-
-	/**
-	 * DRM<->HW Topologies
-	 *
-	 * Name: SINGLEPIPE
-	 * Description: 1 LM, 1 PP, 1 INTF
-	 * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
-	 *
-	 * Name: DUALPIPE
-	 * Description: 2 LM, 2 PP, 2 INTF
-	 * Condition: 1 DRM Encoder w/ 2 Display Tiles
-	 *
-	 * Name: PPSPLIT
-	 * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 2 Display Tiles
-	 *	topology_control & SDE_TOPREQ_PPSPLIT
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width >= layer_mixer.max_width
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width <= layer_mixer.max_width
-	 *	topology_control & SDE_TOPREQ_FORCE_TILING
-	 */
+	int i;
 
 	memset(reqs, 0, sizeof(*reqs));
 
@@ -1009,63 +1001,32 @@
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
 	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
 
-	/* DSC blocks are hardwired for control path 0 and 1 */
-	if (reqs->hw_res.needs_dsc)
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-
-	/* Base assumption is LMs = h_tiles, conditions below may override */
-	reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
-
-	if (reqs->num_lm == 2) {
-		if (RM_RQ_PPSPLIT(reqs)) {
-			/* user requests serving dual display with 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
-			reqs->num_lm = 1;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = true;
-		} else {
-			/* dual display, serve with 2 lms */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
-			reqs->num_ctl = 2;
-			reqs->needs_split_display = true;
+	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					reqs->hw_res.topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
 		}
+	}
 
-	} else if (reqs->num_lm == 1) {
-		if (mode->hdisplay > rm->lm_max_width) {
-			/* wide display, must split across 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else if (RM_RQ_FORCE_TILING(reqs)) {
-			/* thin display, but user requests 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else {
-			/* thin display, serve with only 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		}
-
-	} else {
-		/* Currently no configurations with # LM > 2 */
-		SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+	if (!reqs->topology) {
+		SDE_ERROR("invalid topology for the display\n");
 		return -EINVAL;
 	}
 
-	SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+	/* DSC blocks are hardwired for control path 0 and 1 */
+	if (reqs->topology->num_comp_enc)
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+
+	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 			reqs->hw_res.display_num_of_h_tiles);
-	SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
-			mode->hdisplay, rm->lm_max_width);
-	SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
-			reqs->num_ctl, reqs->top_name);
-	SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
-			reqs->top_name);
-	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
-			reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+			reqs->topology->num_lm, reqs->topology->num_ctl,
+			reqs->topology->top_name,
+			reqs->topology->needs_split_display);
+	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->topology->num_lm,
+			reqs->top_ctrl, reqs->topology->top_name,
+			reqs->topology->num_ctl);
 
 	return 0;
 }
@@ -1189,7 +1150,7 @@
 				sde_connector_get_propinfo(conn),
 				sde_connector_get_property_values(conn->state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 }
 
@@ -1233,17 +1194,6 @@
 	return ret;
 }
 
-int sde_rm_check_property_topctl(uint64_t val)
-{
-	if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
-			(BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
-		SDE_ERROR("ppsplit & force_tiling are incompatible\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 int sde_rm_reserve(
 		struct sde_rm *rm,
 		struct drm_encoder *enc,
@@ -1310,7 +1260,7 @@
 						conn_state->connector),
 				sde_connector_get_property_values(conn_state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 4127bc2..059952a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -22,18 +22,27 @@
 
 /**
  * enum sde_rm_topology_name - HW resource use case in use by connector
- * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
- * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:       1 LM, 1 DSC, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSC:         2 LM, 2 DSC, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: 2 LM, 2 PP, 3DMux, 1 DSC, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:    2 LM, 2 PP, 2 DSC Merge, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT:              1 LM, 2 PPs, 2 INTF/WB
  */
 enum sde_rm_topology_name {
-	SDE_RM_TOPOLOGY_UNKNOWN = 0,
+	SDE_RM_TOPOLOGY_NONE = 0,
 	SDE_RM_TOPOLOGY_SINGLEPIPE,
+	SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,
 	SDE_RM_TOPOLOGY_DUALPIPE,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,
 	SDE_RM_TOPOLOGY_PPSPLIT,
-	SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+	SDE_RM_TOPOLOGY_MAX,
 };
 
 /**
@@ -47,18 +56,11 @@
  *                               Normal behavior would not impact the
  *                               reservation list during the AtomicTest phase.
  * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
- * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
- *                              mixers, despite width fitting within capability
- *                              of a single layer mixer.
- * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
- *                         configuration instead of dual pipe.
  */
 enum sde_rm_topology_control {
 	SDE_RM_TOPCTL_RESERVE_LOCK,
 	SDE_RM_TOPCTL_RESERVE_CLEAR,
 	SDE_RM_TOPCTL_DSPP,
-	SDE_RM_TOPCTL_FORCE_TILING,
-	SDE_RM_TOPCTL_PPSPLIT,
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 68d7653..ebaa1a9 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1403,7 +1403,7 @@
 	}
 
 	/* GPU comes up in secured mode, make it unsecured by default */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+	if (adreno_dev->zap_loaded)
 		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
 	else
 		adreno_writereg(adreno_dev,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 530529f..7c76580 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -410,6 +410,7 @@
  * @gpu_llc_slice_enable: To enable the GPU system cache slice or not
  * @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
  * @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
+ * @zap_loaded: Used to track if zap was successfully loaded or not
  */
 struct adreno_device {
 	struct kgsl_device dev;    /* Must be first field in this struct */
@@ -473,6 +474,7 @@
 	bool gpu_llc_slice_enable;
 	void *gpuhtw_llc_slice;
 	bool gpuhtw_llc_slice_enable;
+	unsigned int zap_loaded;
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 1e95e38..6c8b677 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -30,7 +30,6 @@
 #include "kgsl_trace.h"
 #include "adreno_a5xx_packets.h"
 
-static int zap_ucode_loaded;
 static int critical_packet_constructed;
 
 static struct kgsl_memdesc crit_pkts;
@@ -2179,7 +2178,7 @@
 	 * appropriate register,
 	 * skip if retention is supported for the CPZ register
 	 */
-	if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
+	if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
 		ADRENO_CPZ_RETENTION))) {
 		int ret;
 		struct scm_desc desc = {0};
@@ -2197,14 +2196,13 @@
 	}
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		ptr = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(ptr))
 			return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
-
-		zap_ucode_loaded = 1;
+		adreno_dev->zap_loaded = 1;
 	}
 
 	return 0;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 585beb9..dde10ee 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -490,7 +490,7 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	uint64_t gpuaddr;
-	static void *zap;
+	void *zap;
 	int ret = 0;
 
 	gpuaddr = fw->memdesc.gpuaddr;
@@ -500,14 +500,15 @@
 				upper_32_bits(gpuaddr));
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		zap = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(zap)) {
 			ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
 			zap = NULL;
-		}
+		} else
+			adreno_dev->zap_loaded = 1;
 	}
 
 	return ret;
@@ -1255,6 +1256,8 @@
 				0xFFFFFFFF))
 			goto error_rsc;
 
+		kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
 		/* Turn on the HM and SPTP head switches */
 		ret = a6xx_hm_sptprac_control(device, true);
 	}
@@ -1293,6 +1296,7 @@
 			&val);
 	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
 			&val);
+	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
 
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 01ecb01..decbff3 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -180,6 +180,7 @@
 	unsigned int statetype;
 	const unsigned int *regs;
 	unsigned int num_sets;
+	unsigned int offset;
 } a6xx_non_ctx_dbgahb[] = {
 	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
 		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
@@ -735,10 +736,8 @@
 	return data_size + sizeof(*header);
 }
 
-
-
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
-				size_t remain, void *priv)
+static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
+				u8 *buf, size_t remain, void *priv)
 {
 	struct kgsl_snapshot_regs *header =
 				(struct kgsl_snapshot_regs *)buf;
@@ -783,6 +782,57 @@
 	return (count * 8) + sizeof(*header);
 }
 
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header =
+				(struct kgsl_snapshot_regs *)buf;
+	struct a6xx_non_ctx_dbgahb_registers *regs =
+				(struct a6xx_non_ctx_dbgahb_registers *)priv;
+	unsigned int count = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int i, k;
+	unsigned int *src;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
+				regs);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
+
+	for (i = 0; i < regs->num_sets; i++) {
+		unsigned int start;
+		unsigned int end;
+
+		start = regs->regs[2 * i];
+		end = regs->regs[(2 * i) + 1];
+
+		if (remain < (end - start + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
+
+		remain -= ((end - start) + 1) * 8;
+
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
+		}
+	}
+out:
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
 static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
 				struct kgsl_snapshot *snapshot)
 {
@@ -1491,6 +1541,40 @@
 	return qwords;
 }
 
+static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		regs->offset = *offset;
+
+		/* Program the aperture */
+		ptr[qwords++] = (regs->statetype & 0xff) << 8;
+		ptr[qwords++] =	(((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+					(1 << 21) | 1;
+
+		for (k = 0; k < regs->num_sets; k++) {
+			unsigned int start = regs->regs[2 * k];
+
+			count = REG_PAIR_COUNT(regs->regs, k);
+			ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+			ptr[qwords++] =
+				(((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+					start - regs->regbase / 4) << 44)) |
+							count;
+
+			*offset += count * sizeof(unsigned int);
+		}
+	}
+	return qwords;
+}
+
 void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1578,6 +1662,26 @@
 		}
 	}
 
+	/*
+	 * Calculate the script and data size for non context debug
+	 * AHB registers
+	 */
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		/* 16 bytes for programming the aperture */
+		script_size += 16;
+
+		/* Reading each pair of registers takes 16 bytes */
+		script_size += 16 * regs->num_sets;
+
+		/* A dword per register read from the cluster list */
+		for (k = 0; k < regs->num_sets; k++)
+			data_size += REG_PAIR_COUNT(regs->regs, k) *
+				sizeof(unsigned int);
+	}
+
 	/* Now allocate the script and data buffers */
 
 	/* The script buffers needs 2 extra qwords on the end */
@@ -1619,6 +1723,8 @@
 
 	ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
 
+	ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
+
 	*ptr++ = 0;
 	*ptr++ = 0;
 }
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 19de267..c05c069 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -3,3 +3,6 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/Makefile b/drivers/media/platform/msm/camera/cam_cdm/Makefile
new file mode 100644
index 0000000..ad4ec04
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
+				cam_cdm_core_common.o cam_cdm_virtual_core.o \
+				cam_cdm_hw_core.o
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
new file mode 100644
index 0000000..fc7a493
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+
+#ifdef CONFIG_CAM_CDM_DBG
+#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#ifdef CONFIG_CAM_CDM_DUMP_DBG
+#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
+#define CAM_SW_CDM_INDEX                  0
+#define CAM_CDM_INFLIGHT_WORKS            5
+#define CAM_CDM_HW_RESET_TIMEOUT          3000
+
+#define CAM_CDM_HW_ID_MASK      0xF
+#define CAM_CDM_HW_ID_SHIFT     0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+	CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+	((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+	 ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+	CAM_REG_ATTR_READ,
+	CAM_REG_ATTR_WRITE,
+	CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+	CAM_CDM_HW_INTF_CMD_ACQUIRE,
+	CAM_CDM_HW_INTF_CMD_RELEASE,
+	CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+	CAM_CDM_HW_INTF_CMD_RESET_HW,
+	CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+	/*cfg_offsets 0*/
+	CDM_CFG_HW_VERSION,
+	CDM_CFG_TITAN_VERSION,
+	CDM_CFG_RST_CMD,
+	CDM_CFG_CGC_CFG,
+	CDM_CFG_CORE_CFG,
+	CDM_CFG_CORE_EN,
+	CDM_CFG_FE_CFG,
+	/*irq_offsets 7*/
+	CDM_IRQ_MASK,
+	CDM_IRQ_CLEAR,
+	CDM_IRQ_CLEAR_CMD,
+	CDM_IRQ_SET,
+	CDM_IRQ_SET_CMD,
+	CDM_IRQ_STATUS,
+	CDM_IRQ_USR_DATA,
+	/*BL FIFO Registers 14*/
+	CDM_BL_FIFO_BASE_REG,
+	CDM_BL_FIFO_LEN_REG,
+	CDM_BL_FIFO_STORE_REG,
+	CDM_BL_FIFO_CFG,
+	CDM_BL_FIFO_RB,
+	CDM_BL_FIFO_BASE_RB,
+	CDM_BL_FIFO_LEN_RB,
+	CDM_BL_FIFO_PENDING_REQ_RB,
+	/*CDM System Debug Registers 22*/
+	CDM_DBG_WAIT_STATUS,
+	CDM_DBG_SCRATCH_0_REG,
+	CDM_DBG_SCRATCH_1_REG,
+	CDM_DBG_SCRATCH_2_REG,
+	CDM_DBG_SCRATCH_3_REG,
+	CDM_DBG_SCRATCH_4_REG,
+	CDM_DBG_SCRATCH_5_REG,
+	CDM_DBG_SCRATCH_6_REG,
+	CDM_DBG_SCRATCH_7_REG,
+	CDM_DBG_LAST_AHB_ADDR,
+	CDM_DBG_LAST_AHB_DATA,
+	CDM_DBG_CORE_DBUG,
+	CDM_DBG_LAST_AHB_ERR_ADDR,
+	CDM_DBG_LAST_AHB_ERR_DATA,
+	CDM_DBG_CURRENT_BL_BASE,
+	CDM_DBG_CURRENT_BL_LEN,
+	CDM_DBG_CURRENT_USED_AHB_BASE,
+	CDM_DBG_DEBUG_STATUS,
+	/*FE Bus Miser Registers 40*/
+	CDM_BUS_MISR_CFG_0,
+	CDM_BUS_MISR_CFG_1,
+	CDM_BUS_MISR_RD_VAL,
+	/*Performance Counter registers 43*/
+	CDM_PERF_MON_CTRL,
+	CDM_PERF_MON_0,
+	CDM_PERF_MON_1,
+	CDM_PERF_MON_2,
+	/*Spare registers 47*/
+	CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+	uint32_t offset;
+	enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+	uint32_t first_offset;
+	uint32_t last_offset;
+	uint32_t reg_count;
+	const struct cam_cdm_reg_offset *offsets;
+	uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+	CAM_CDM_FLAG_SHARED_CDM,
+	CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+	CAM_VIRTUAL_CDM,
+	CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+	CAM_HW_CDM_BASE_INDEX,
+	CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+	struct cam_cdm_acquire_data data;
+	void __iomem  *changebase_addr;
+	uint32_t stream_on;
+	uint32_t refcount;
+	struct mutex lock;
+	uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+	CAM_HW_CDM_BL_CB_CLIENT = 1,
+	CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+	uint8_t bl_tag;
+	enum cam_cdm_bl_cb_type request_type;
+	uint32_t client_hdl;
+	void *userdata;
+	uint32_t cookie;
+	struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+	uint32_t handle;
+	struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory.struct */
+struct cam_cdm_hw_mem {
+	int32_t handle;
+	uint32_t vaddr;
+	uint64_t kmdvaddr;
+	size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+	uint32_t index;
+	char name[128];
+	enum cam_cdm_id id;
+	enum cam_cdm_flags flags;
+	struct completion reset_complete;
+	struct completion bl_complete;
+	struct workqueue_struct *work_queue;
+	struct list_head bl_request_list;
+	struct cam_hw_version version;
+	uint32_t hw_version;
+	uint32_t hw_family_version;
+	struct cam_iommu_handle iommu_hdl;
+	struct cam_cdm_reg_offset_table *offset_tbl;
+	struct cam_cdm_utils_ops *ops;
+	struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+	uint8_t bl_tag;
+	atomic_t error;
+	struct cam_cdm_hw_mem gen_irq;
+	uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+	bool dt_cdm_shared;
+	uint32_t dt_num_supported_clients;
+	const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+	struct mutex lock;
+	uint32_t refcount;
+	struct cam_hw_intf *device;
+	struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+	bool probe_done;
+	struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+	uint32_t cdm_count;
+	uint32_t dt_supported_hw_cdm;
+	int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index);
+
+#endif /* _CAM_CDM_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
new file mode 100644
index 0000000..341406a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -0,0 +1,547 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client get refcount=%d\n",
+		client->refcount);
+	client->refcount++;
+	mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client put refcount=%d\n",
+		client->refcount);
+	if (client->refcount > 0) {
+		client->refcount--;
+	} else {
+		pr_err("Refcount put when zero\n");
+		WARN_ON(1);
+	}
+	mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version)
+{
+	switch (ver) {
+	case CAM_CDM170_VERSION:
+		cam_version->major    = (ver & 0xF0000000);
+		cam_version->minor    = (ver & 0xFFF0000);
+		cam_version->incr     = (ver & 0xFFFF);
+		cam_version->reserved = 0;
+		return true;
+	default:
+		pr_err("CDM Version=%x not supported in util\n", ver);
+	break;
+	}
+	return false;
+}
+
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+{
+	pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+		evt_data);
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+	if (by_cam_version == false) {
+		switch (ver) {
+		case CAM_CDM170_VERSION:
+			return &CDM170_ops;
+		default:
+			pr_err("CDM Version=%x not supported in util\n", ver);
+		}
+	} else if (cam_version) {
+		if ((cam_version->major == 1) && (cam_version->minor == 0) &&
+			(cam_version->incr == 0))
+			return &CDM170_ops;
+		pr_err("cam_hw_version=%x:%x:%x not supported\n",
+			cam_version->major, cam_version->minor,
+			cam_version->incr);
+	}
+
+	return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+
+	list_for_each_entry(node, bl_list, entry) {
+		if (node->bl_tag == tag)
+			return node;
+	}
+	pr_err("Could not find the bl request for tag=%d\n", tag);
+
+	return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core;
+
+	if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+		(sizeof(struct cam_iommu_handle) == arg_size)) {
+		cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+		*((struct cam_iommu_handle *)get_hw_cap_args) =
+			cdm_core->iommu_hdl;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+	int i;
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (hw->clients[i] == NULL) {
+			CDM_CDBG("Found client slot %d\n", i);
+			return i;
+		}
+	}
+	pr_err("No more client slots\n");
+
+	return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data)
+{
+	int i;
+	struct cam_cdm *core = NULL;
+	struct cam_cdm_client *client = NULL;
+
+	if (!cdm_hw) {
+		pr_err("CDM Notify called with NULL hw info\n");
+		return;
+	}
+	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		int client_idx;
+		struct cam_cdm_bl_cb_request_entry *node =
+			(struct cam_cdm_bl_cb_request_entry *)data;
+
+		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+		client = core->clients[client_idx];
+		if ((!client) || (client->handle != node->client_hdl)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				node->client_hdl);
+			return;
+		}
+		cam_cdm_get_client_refcount(client);
+		if (client->data.cam_cdm_callback) {
+			CDM_CDBG("Calling client=%s cb cookie=%d\n",
+				client->data.identifier, node->cookie);
+			client->data.cam_cdm_callback(node->client_hdl,
+				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+				node->cookie);
+			CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+		} else {
+			pr_err("No cb registered for client hdl=%x\n",
+				node->client_hdl);
+		}
+		cam_cdm_put_client_refcount(client);
+		return;
+	}
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (core->clients[i] != NULL) {
+			client = core->clients[i];
+			mutex_lock(&client->lock);
+			CDM_CDBG("Found client slot %d\n", i);
+			if (client->data.cam_cdm_callback) {
+				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+					unsigned long iova =
+						(unsigned long)data;
+
+					client->data.cam_cdm_callback(
+						client->handle,
+						client->data.userdata,
+						CAM_CDM_CB_STATUS_PAGEFAULT,
+						(iova & 0xFFFFFFFF));
+				}
+			} else {
+				pr_err("No cb registered for client hdl=%x\n",
+					client->handle);
+			}
+			mutex_unlock(&client->lock);
+		}
+	}
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+	void *start_args, bool operation)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *core = NULL;
+	int rc = -1;
+	int client_idx;
+	struct cam_cdm_client *client;
+	uint32_t *handle = start_args;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+	client = core->clients[client_idx];
+	if (!client) {
+		pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+		return -EINVAL;
+	}
+	cam_cdm_get_client_refcount(client);
+	if (*handle != client->handle) {
+		pr_err("client id given handle=%x invalid\n", *handle);
+		cam_cdm_put_client_refcount(client);
+		return -EINVAL;
+	}
+	if (operation == true) {
+		if (true == client->stream_on) {
+			pr_err("Invalid CDM client is already streamed ON\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	} else {
+		if (client->stream_on == false) {
+			pr_err("Invalid CDM client is already streamed Off\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	if (operation == true) {
+		if (!cdm_hw->open_count) {
+			struct cam_ahb_vote ahb_vote;
+			struct cam_axi_vote axi_vote;
+
+			ahb_vote.type = CAM_VOTE_ABSOLUTE;
+			ahb_vote.vote.level = CAM_SVS_VOTE;
+			axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+			rc = cam_cpas_start(core->cpas_handle,
+				&ahb_vote, &axi_vote);
+			if (rc != 0) {
+				pr_err("CPAS start failed\n");
+				goto end;
+			}
+			CDM_CDBG("CDM init first time\n");
+			if (core->id == CAM_CDM_VIRTUAL) {
+				CDM_CDBG("Virtual CDM HW init first time\n");
+				rc = 0;
+			} else {
+				CDM_CDBG("CDM HW init first time\n");
+				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+				if (rc == 0) {
+					rc = cam_hw_cdm_alloc_genirq_mem(
+						hw_priv);
+					if (rc != 0) {
+						pr_err("Genirqalloc failed\n");
+						cam_hw_cdm_deinit(hw_priv,
+							NULL, 0);
+					}
+				} else {
+					pr_err("CDM HW init failed\n");
+				}
+			}
+			if (rc == 0) {
+				cdm_hw->open_count++;
+				client->stream_on = true;
+			} else {
+				if (cam_cpas_stop(core->cpas_handle))
+					pr_err("CPAS stop failed\n");
+			}
+		} else {
+			cdm_hw->open_count++;
+			CDM_CDBG("CDM HW already ON count=%d\n",
+				cdm_hw->open_count);
+			rc = 0;
+			client->stream_on = true;
+		}
+	} else {
+		if (cdm_hw->open_count) {
+			cdm_hw->open_count--;
+			CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+			if (!cdm_hw->open_count) {
+				CDM_CDBG("CDM Deinit now\n");
+				if (core->id == CAM_CDM_VIRTUAL) {
+					CDM_CDBG("Virtual CDM HW Deinit\n");
+					rc = 0;
+				} else {
+					CDM_CDBG("CDM HW Deinit now\n");
+					rc = cam_hw_cdm_deinit(
+						hw_priv, NULL, 0);
+					if (cam_hw_cdm_release_genirq_mem(
+						hw_priv))
+						pr_err("Genirq release failed\n");
+				}
+				if (rc) {
+					pr_err("Deinit failed in streamoff\n");
+				} else {
+					client->stream_on = false;
+					rc = cam_cpas_stop(core->cpas_handle);
+					if (rc)
+						pr_err("CPAS stop failed\n");
+				}
+			} else {
+				client->stream_on = false;
+				CDM_CDBG("Client stream off success =%d\n",
+					cdm_hw->open_count);
+			}
+		} else {
+			CDM_CDBG("stream OFF CDM Invalid %d\n",
+				cdm_hw->open_count);
+			rc = -ENXIO;
+		}
+	}
+end:
+	cam_cdm_put_client_refcount(client);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+	return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+	return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+	uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_data = NULL;
+	struct cam_cdm *core = NULL;
+	int rc = -EINVAL;
+
+	if ((!hw_priv) || (!cmd_args) ||
+		(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+		return rc;
+
+	soc_data = &cdm_hw->soc_info;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	switch (cmd) {
+	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+		struct cam_cdm_hw_intf_cmd_submit_bl *req;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+		if ((req->data->type < 0) ||
+			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+			pr_err("Invalid req bl cmd addr type=%d\n",
+				req->data->type);
+			break;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+		client = core->clients[idx];
+		if ((!client) || (req->handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				req->handle);
+			break;
+		}
+		cam_cdm_get_client_refcount(client);
+		if ((req->data->flag == true) &&
+			(!client->data.cam_cdm_callback)) {
+			pr_err("CDM request cb without registering cb\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (client->stream_on != true) {
+			pr_err("Invalid CDM needs to be streamed ON first\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (core->id == CAM_CDM_VIRTUAL)
+			rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+		else
+			rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+		cam_cdm_put_client_refcount(client);
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+		struct cam_cdm_acquire_data *data;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+
+		mutex_lock(&cdm_hw->hw_mutex);
+		data = (struct cam_cdm_acquire_data *)cmd_args;
+		CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+		idx = cam_cdm_find_free_client_slot(core);
+		if ((idx < 0) || (core->clients[idx])) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+			break;
+		}
+		core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+			GFP_KERNEL);
+		if (!core->clients[idx]) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			rc = -ENOMEM;
+			break;
+		}
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		mutex_init(&client->lock);
+		data->ops = core->ops;
+		if (core->id == CAM_CDM_VIRTUAL) {
+			data->cdm_version.major = 1;
+			data->cdm_version.minor = 0;
+			data->cdm_version.incr = 0;
+			data->cdm_version.reserved = 0;
+			data->ops = cam_cdm_get_ops(0,
+					&data->cdm_version, true);
+			if (!data->ops) {
+				mutex_destroy(&client->lock);
+				mutex_lock(&cdm_hw->hw_mutex);
+				kfree(core->clients[idx]);
+				core->clients[idx] = NULL;
+				mutex_unlock(
+					&cdm_hw->hw_mutex);
+				rc = -1;
+				break;
+			}
+		} else {
+			data->cdm_version = core->version;
+		}
+
+		cam_cdm_get_client_refcount(client);
+		mutex_lock(&client->lock);
+		memcpy(&client->data, data,
+			sizeof(struct cam_cdm_acquire_data));
+		client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+					core->index,
+					idx);
+		client->stream_on = false;
+		data->handle = client->handle;
+		CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+			data->identifier, core->index);
+		mutex_unlock(&client->lock);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RELEASE: {
+		uint32_t *handle = cmd_args;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+				cmd, arg_size, *handle);
+			return -EINVAL;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+		mutex_lock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		if ((!client) || (*handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		cam_cdm_put_client_refcount(client);
+		mutex_lock(&client->lock);
+		if (client->refcount != 0) {
+			pr_err("CDM Client refcount not zero %d",
+				client->refcount);
+			rc = -1;
+			mutex_unlock(&client->lock);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		core->clients[idx] = NULL;
+		mutex_unlock(&client->lock);
+		mutex_destroy(&client->lock);
+		kfree(client);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+		pr_err("CDM HW reset not supported for handle =%x\n",
+			*((uint32_t *)cmd_args));
+		break;
+	}
+	default:
+		pr_err("CDM HW intf command not valid =%d\n", cmd);
+		break;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
new file mode 100644
index 0000000..eb75aaa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+	bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+	uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version);
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
new file mode 100644
index 0000000..7f2c455
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -0,0 +1,1025 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <media/cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+	{
+		.compatible = CAM_HW_CDM_CPAS_0_NAME,
+		.data = &cam170_cpas_cdm_offset_table,
+	},
+	{}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+	if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+		return CAM_CDM_CPAS_0;
+
+	return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+	uint32_t *pending_bl)
+{
+	int rc = 0;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+		pending_bl)) {
+		pr_err("Failed to read CDM pending BL's\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg = 0;
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("dump core en=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+	pr_err("dump scratch0=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+	pr_err("dump scratch1=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+	pr_err("dump scratch2=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+	pr_err("dump scratch3=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+	pr_err("dump scratch4=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+	pr_err("dump scratch5=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+	pr_err("dump scratch6=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+	pr_err("dump scratch7=%x\n", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+	struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg, core_dbg, loop_cnt;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("CDM HW core status=%x\n", dump_reg);
+	/* First pause CDM */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+	loop_cnt = dump_reg;
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+	pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+	if (core_dbg & 0x100) {
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+		pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+		pr_err("AHB dump reglastdata=%x\n", dump_reg);
+	} else {
+		pr_err("CDM HW AHB dump not enable\n");
+	}
+
+	if (core_dbg & 0x10000) {
+		int i;
+
+		pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+		for (i = 0 ; i < loop_cnt ; i++) {
+			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+				&dump_reg);
+			pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+				&dump_reg);
+			pr_err("BL(%d) len=%d tag=%d\n", i,
+				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+		}
+	} else {
+		pr_err("CDM HW BL FIFO readback not enable\n");
+	}
+
+	pr_err("CDM HW default dump\n");
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+	pr_err("CDM HW core cfg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+	pr_err("CDM HW irq status=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+	pr_err("CDM HW irq set reg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+	pr_err("CDM HW current BL base=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+	pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
+		(dump_reg & 0xFF000000));
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+	pr_err("CDM HW current AHB base=%x\n", dump_reg);
+
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+
+	/* Enable CDM back */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+	uint32_t bl_count)
+{
+	uint32_t pending_bl = 0;
+	int32_t available_bl_slots = 0;
+	int rc = -1;
+	long time_left;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	do {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+			&pending_bl)) {
+			pr_err("Failed to read CDM pending BL's\n");
+			rc = -1;
+			break;
+		}
+		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+		if (available_bl_slots < 0) {
+			pr_err("Invalid available slots %d:%d:%d\n",
+				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+				pending_bl);
+			break;
+		}
+		if (bl_count < (available_bl_slots - 1)) {
+			CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+				(available_bl_slots - 1), bl_count);
+				rc = bl_count;
+				break;
+		} else if (0 == (available_bl_slots - 1)) {
+			time_left = wait_for_completion_timeout(
+				&core->bl_complete, msecs_to_jiffies(
+				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+			if (time_left <= 0) {
+				pr_err("CDM HW BL Wait timed out failed\n");
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			CDM_CDBG("CDM HW is ready for data\n");
+		} else {
+			rc = (bl_count - (available_bl_slots - 1));
+			break;
+		}
+	} while (1);
+
+	return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+	uint32_t len, uint32_t tag)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+		pr_err("Failed to write CDM base to BL base\n");
+		return true;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+		pr_err("Failed to write CDM BL len\n");
+		return true;
+	}
+	return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+		pr_err("Failed to write CDM commit BL\n");
+		return true;
+	}
+	return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t len;
+	int rc;
+
+	if (core->bl_tag > 63) {
+		pr_err("bl_tag invalid =%d\n", core->bl_tag);
+		rc = -EINVAL;
+		goto end;
+	}
+	CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
+		core->bl_tag, req->data->cmd_arrary_count);
+	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+			GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+	node->client_hdl = req->handle;
+	node->cookie = req->data->cookie;
+	node->bl_tag = core->bl_tag;
+	node->userdata = req->data->userdata;
+	list_add_tail(&node->entry, &core->bl_request_list);
+	len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+	core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+		core->bl_tag);
+	rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+		((4 * core->ops->cdm_required_size_genirq()) - 1),
+		core->bl_tag);
+	if (rc) {
+		pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+		goto end;
+	}
+
+	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+		pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+	}
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t pending_bl = 0;
+	int write_count = 0;
+
+	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+		pr_info("requested BL more than max size, cnt=%d max=%d\n",
+			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+	}
+
+	if (atomic_read(&core->error) != 0) {
+		pr_err("HW in error state, cannot trigger transactions now\n");
+		return rc;
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	mutex_lock(&client->lock);
+	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+	if (rc) {
+		pr_err("Cannot read the current BL depth\n");
+		mutex_unlock(&client->lock);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		return rc;
+	}
+
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t hw_vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (atomic_read(&core->error) != 0) {
+			pr_err("HW in error state cmd_count=%d total cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (write_count == 0) {
+			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+				(req->data->cmd_arrary_count - i));
+			if (write_count < 0) {
+				pr_err("wait for bl fifo failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			write_count--;
+		}
+
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_io_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+				&len);
+		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				pr_err("Hw bl hw_iova is invalid %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			hw_vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+		} else {
+			pr_err("Only mem hdl/hw va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (hw_vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("Got the HW VA\n");
+			rc = cam_hw_cdm_bl_write(cdm_hw,
+				((uint32_t)hw_vaddr_ptr +
+					cdm_cmd->cmd[i].offset),
+				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
+			if (rc) {
+				pr_err("Hw bl write failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for %d:%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+
+		if (!rc) {
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL\n");
+			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+				pr_err("Cannot commit the BL %d tag=%d\n",
+					i, (core->bl_tag - 1));
+				rc = -1;
+				break;
+			}
+			CDM_CDBG("BL commit success BL %d tag=%d\n", i,
+				(core->bl_tag - 1));
+			if ((req->data->flag == true) &&
+				(i == (req->data->cmd_arrary_count -
+				1))) {
+				rc = cam_hw_cdm_submit_gen_irq(
+					cdm_hw, req);
+				if (rc == 0)
+					core->bl_tag++;
+			}
+			if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
+				core->bl_tag))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("inline IRQ data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+					payload->irq_data,
+					&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+						CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+			pr_err("CDM HW BL done IRQ\n");
+			complete(&core->bl_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			pr_err("Invalid command IRQ, Need HW reset\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+			pr_err("AHB IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+			pr_err("Overflow IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		kfree(payload);
+	} else {
+		pr_err("NULL payload\n");
+	}
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_cdm *core = NULL;
+
+	if (token) {
+		cdm_hw = (struct cam_hw_info *)token;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		atomic_inc(&core->error);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		pr_err("Page fault iova addr %pK\n", (void *)iova);
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+			(void *)iova);
+		atomic_dec(&core->error);
+	} else {
+		pr_err("Invalid token\n");
+	}
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cdm_hw = data;
+	struct cam_cdm *cdm_core = cdm_hw->core_info;
+	struct cam_cdm_work_payload *payload;
+	bool work_status;
+
+	CDM_CDBG("Got irq\n");
+	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+	if (payload) {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+				&payload->irq_status)) {
+			pr_err("Failed to read CDM HW IRQ status\n");
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+				&payload->irq_data)) {
+				pr_err("Failed to read CDM HW IRQ data\n");
+			}
+		}
+		CDM_CDBG("Got payload=%d\n", payload->irq_status);
+		payload->hw = cdm_hw;
+		INIT_WORK((struct work_struct *)&payload->work,
+			cam_hw_cdm_work);
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+			payload->irq_status))
+			pr_err("Failed to Write CDM HW IRQ Clear\n");
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+			pr_err("Failed to Write CDM HW IRQ cmd\n");
+		work_status = queue_work(cdm_core->work_queue, &payload->work);
+		if (work_status == false) {
+			pr_err("Failed to queue work for irq=%x\n",
+				payload->irq_status);
+			kfree(payload);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+	struct cam_mem_mgr_memory_desc genirq_alloc_out;
+	struct cam_cdm *cdm_core = NULL;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_alloc_cmd.align = 0;
+	genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+	genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+	genirq_alloc_cmd.flags = 0;
+	genirq_alloc_cmd.region = CAM_MEM_MGR_REGION_NON_SECURE_IO;
+	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+		&genirq_alloc_out);
+	if (rc) {
+		pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+		goto end;
+	}
+	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+	cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+	cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+	cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_mem_mgr_memory_desc genirq_release_cmd;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+	if (rc)
+		pr_err("Failed to put genirq cmd space for hw\n");
+
+	return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc;
+	long time_left;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("Enable platform failed\n");
+		goto end;
+	}
+
+	CDM_CDBG("Enable soc done\n");
+
+/* Before triggering the reset to HW, clear the reset complete */
+	reinit_completion(&cdm_core->reset_complete);
+	reinit_completion(&cdm_core->bl_complete);
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+		pr_err("Failed to Write CDM HW IRQ mask\n");
+		goto disable_return;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+		pr_err("Failed to Write CDM HW reset\n");
+		goto disable_return;
+	}
+
+	CDM_CDBG("Waiting for CDM HW resetdone\n");
+	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+			msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+	if (time_left <= 0) {
+		pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+		goto disable_return;
+	} else {
+		CDM_CDBG("CDM Init success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+		cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
+		rc = 0;
+		goto end;
+	}
+
+disable_return:
+	rc = -1;
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+	return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = cdm_hw->core_info;
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("disable platform failed\n");
+	} else {
+		CDM_CDBG("CDM Deinit success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_HW_CDM;
+	cdm_hw->open_count = 0;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+
+	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		goto release_mem;
+	}
+	cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+		cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	atomic_set(&cdm_core->error, 0);
+	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+	if (cdm_core->id >= CAM_CDM_MAX) {
+		pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+		goto release_private_mem;
+	}
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	init_completion(&cdm_core->bl_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+	cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+	mutex_lock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+	if (rc < 0) {
+		pr_err("cpas-cdm get iommu handle failed\n");
+		goto unlock_release_mem;
+	}
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
+	if (rc < 0) {
+		pr_err("Attach iommu non secure handle failed\n");
+		goto destroy_non_secure_hdl;
+	}
+	cdm_core->iommu_hdl.secure = -1;
+
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+
+	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+			cam_hw_cdm_irq, cdm_hw);
+	if (rc) {
+		pr_err("Failed to request platform resource\n");
+		goto destroy_non_secure_hdl;
+	}
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto release_platform_resource;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Init CDM HW\n");
+		goto init_failed;
+	}
+	cdm_hw->open_count++;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_version)) {
+		pr_err("Failed to read CDM HW Version\n");
+		goto deinit;
+	}
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_family_version)) {
+		pr_err("Failed to read CDM family Version\n");
+		goto deinit;
+	}
+
+	CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+		cdm_core->hw_family_version, cdm_core->hw_version);
+	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+		false);
+	if (!cdm_core->ops) {
+		pr_err("Failed to util ops for hw\n");
+		goto deinit;
+	}
+
+	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+		&cdm_core->version)) {
+		pr_err("Failed to set cam he version for hw\n");
+		goto deinit;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Deinit CDM HW\n");
+		goto release_platform_resource;
+	}
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+		soc_private, CAM_HW_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("HW CDM Interface registration failed\n");
+		goto release_platform_resource;
+	}
+	cdm_hw->open_count--;
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	return rc;
+
+deinit:
+	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+		pr_err("Deinit failed for hw\n");
+	cdm_hw->open_count--;
+init_failed:
+	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+		pr_err("CPAS unregister failed\n");
+release_platform_resource:
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+unlock_release_mem:
+	mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+	kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+	int rc = -EBUSY;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get hw private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get hw core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	if (cdm_hw->open_count != 0) {
+		pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+			cdm_hw->open_count);
+		return rc;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Deinit failed for hw\n");
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+
+	return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+	.probe = cam_hw_cdm_probe,
+	.remove = cam_hw_cdm_remove,
+	.driver = {
+		.name = "msm_cam_cdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_hw_cdm_dt_match,
+	},
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+	return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+	platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
new file mode 100644
index 0000000..b1b2117
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -0,0 +1,569 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+	{ .compatible = "qcom,cam-cdm-intf", },
+	{}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+	int rc = 0;
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+		rc = -1;
+	} else {
+		CDM_CDBG("CDM intf mgr get refcount=%d\n",
+			cdm_mgr.refcount);
+		cdm_mgr.refcount++;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+	} else {
+		CDM_CDBG("CDM intf mgr put refcount=%d\n",
+			cdm_mgr.refcount);
+		if (cdm_mgr.refcount > 0) {
+			cdm_mgr.refcount--;
+		} else {
+			pr_err("Refcount put when zero\n");
+			WARN_ON(1);
+		}
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+	uint32_t hw_idx)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+	if (hw->hw_ops.get_hw_caps) {
+		rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+			sizeof(struct cam_iommu_handle));
+	}
+
+	return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+	uint32_t cell_index, uint32_t *hw_index)
+{
+	int rc = -1, i, j;
+	char client_name[128];
+
+	CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+		identifier, cell_index);
+	snprintf(client_name, sizeof(client_name), "%s", identifier);
+	CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+		cdm_mgr.cdm_count);
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		CDM_CDBG("dt_num_supported_clients=%d\n",
+			cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+		for (j = 0; j <
+			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+			CDM_CDBG("client name:%s\n",
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				client_name)) {
+				rc = 0;
+				*hw_index = i;
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+
+	return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles)
+{
+	int i, j, rc = -1;
+
+	if ((!identifier) || (!cdm_handles))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+	CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		if (!cdm_mgr.nodes[i].data) {
+			mutex_unlock(&cdm_mgr.nodes[i].lock);
+			continue;
+		}
+		for (j = 0; j <
+			 cdm_mgr.nodes[i].data->dt_num_supported_clients;
+			j++) {
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				identifier)) {
+				rc = get_cdm_iommu_handle(cdm_handles, i);
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw;
+	uint32_t hw_index = 0;
+
+	if ((!data) || (!data->identifier) || (!data->base_array) ||
+		(!data->base_array_cnt))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (data->id > CAM_CDM_HW_ANY) {
+		pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
+		rc = -1;
+		goto end;
+	}
+	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+		&hw_index);
+	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+		pr_err("Failed to identify associated hw id\n");
+		goto end;
+	} else {
+		CDM_CDBG("hw_index:%d\n", hw_index);
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+					sizeof(struct cam_cdm_acquire_data));
+			if (rc < 0) {
+				pr_err("CDM hw acquire failed\n");
+				goto end;
+			}
+		} else {
+			pr_err("idx %d doesn't have acquire ops\n", hw_index);
+			rc = -1;
+		}
+	}
+end:
+	if (rc < 0) {
+		pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+			data->id, data->identifier, data->cell_index);
+		put_cdm_mgr_refcount();
+	}
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("hw release failed for handle=%x\n",
+					handle);
+		} else
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+	}
+	put_cdm_mgr_refcount();
+	if (rc == 0)
+		put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (!data)
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			req.data = data;
+			req.handle = handle;
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+			if (rc < 0)
+				pr_err("hw submit bl failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have submit ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+			if (hw && hw->hw_ops.start) {
+				rc = hw->hw_ops.start(hw->hw_priv, &handle,
+						sizeof(uint32_t));
+				if (rc < 0)
+					pr_err("hw start failed handle=%x\n",
+						handle);
+			} else {
+				pr_err("hw idx %d doesn't have start ops\n",
+					hw_index);
+			}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.stop) {
+			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+					sizeof(uint32_t));
+			if (rc < 0)
+				pr_err("hw stop failed handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have stop ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("CDM hw release failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data) || (!index))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+		mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+		cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else {
+		pr_err("CDM registration failed type=%d count=%d\n",
+			type, cdm_mgr.cdm_count);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index)
+{
+	int rc = -1;
+
+	if ((!hw) || (!data))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+		(index == CAM_SW_CDM_INDEX)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) &&
+		(hw == cdm_mgr.nodes[index].device)) {
+		mutex_lock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.nodes[index].device = NULL;
+		cdm_mgr.nodes[index].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.cdm_count--;
+		rc = 0;
+	} else {
+		pr_err("CDM Deregistration failed type=%d index=%d\n",
+			type, index);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+	int i, rc;
+
+	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		return rc;
+	}
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		mutex_init(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = true;
+	cdm_mgr.refcount = 0;
+	mutex_unlock(&cam_cdm_mgr_lock);
+	rc = cam_virtual_cdm_probe(pdev);
+	if (rc) {
+		mutex_lock(&cam_cdm_mgr_lock);
+		cdm_mgr.probe_done = false;
+		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+				(cdm_mgr.nodes[i].refcount != 0))
+				pr_err("Valid node present in index=%d\n", i);
+			mutex_destroy(&cdm_mgr.nodes[i].lock);
+			cdm_mgr.nodes[i].device = NULL;
+			cdm_mgr.nodes[i].data = NULL;
+			cdm_mgr.nodes[i].refcount = 0;
+		}
+		mutex_unlock(&cam_cdm_mgr_lock);
+	}
+
+	return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+	int i, rc = -EBUSY;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (cam_virtual_cdm_remove(pdev)) {
+		pr_err("Virtual CDM remove failed\n");
+		goto end;
+	}
+	put_cdm_mgr_refcount();
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.refcount != 0) {
+		pr_err("cdm manger refcount not zero %d\n",
+			cdm_mgr.refcount);
+		goto end;
+	}
+
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+			(cdm_mgr.nodes[i].refcount != 0)) {
+			pr_err("Valid node present in index=%d\n", i);
+			mutex_unlock(&cam_cdm_mgr_lock);
+			goto end;
+		}
+		mutex_destroy(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = false;
+	rc = 0;
+
+end:
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+	.probe = cam_cdm_intf_probe,
+	.remove = cam_cdm_intf_remove,
+	.driver = {
+	.name = "msm_cam_cdm_intf",
+	.owner = THIS_MODULE,
+	.of_match_table = msm_cam_cdm_intf_dt_match,
+	},
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+	return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+	platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
new file mode 100644
index 0000000..66c75f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+	CAM_CDM_VIRTUAL,
+	CAM_CDM_HW_ANY,
+	CAM_CDM_CPAS_0,
+	CAM_CDM_IPE0,
+	CAM_CDM_IPE1,
+	CAM_CDM_BPS,
+	CAM_CDM_VFE,
+	CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+	CAM_CDM_CB_STATUS_BL_SUCCESS,
+	CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+	CAM_CDM_CB_STATUS_PAGEFAULT,
+	CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+	CAM_CDM_CB_STATUS_HW_RESET_DONE,
+	CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+	CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+	CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+	CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ *                    like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ *                     of the device. This can be used to form a unique string
+ *                     with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ *             of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ *                     callbacks from CDM driver
+ *                     @handle : CDM Client handle
+ *                     @userdata : Private data given at the time of acquire
+ *                     @status : Callback status
+ *                     @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ *                   in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ *               needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ *                it is Input while acquiring virtual cdm, Currently fixing it
+ *                to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ *                     util ops for this HW version of CDM acquired.
+ * @handle  : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+	char identifier[128];
+	uint32_t cell_index;
+	enum cam_cdm_id id;
+	void *userdata;
+	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+		enum cam_cdm_cb_status status, uint32_t cookie);
+	uint32_t base_array_cnt;
+	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+	struct cam_hw_version cdm_version;
+	struct cam_cdm_utils_ops *ops;
+	uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ *           by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+	union {
+		int32_t mem_handle;
+		uint32_t *hw_iova;
+		void *kernel_iova;
+	} bl_addr;
+	uint32_t  offset;
+	uint32_t  len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ *            request is done
+ * @userdata :Input private data which will be returned as part
+ *             of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array     : Input payload holding the BL cmd's arrary
+ *                     to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+	int flag;
+	void *userdata;
+	uint32_t cookie;
+	enum cam_cdm_bl_cmd_addr_type type;
+	uint32_t cmd_arrary_count;
+	struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ *                   like vfe, ife, jpeg etc, We do not need cell index
+ *                   assuming all devices of a single type maps to one SMMU
+ *                   client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data   : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ *          this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
new file mode 100644
index 0000000..0f5458c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+		reg)));
+	CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("accessing invalid mapped region %d\n", reg);
+			goto permission_error;
+		}
+		*value = cam_io_r_mb(reg_addr);
+		CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
+			(void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)),	*value);
+		return false;
+	}
+permission_error:
+	*value = 0;
+	return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("Accessing invalid region %d:%d\n",
+				reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+			goto permission_error;
+		}
+		cam_io_w_mb(value, reg_addr);
+		return false;
+	}
+permission_error:
+	return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr)
+{
+	int i, rc = -1;
+
+	ptr->dt_num_supported_clients = of_property_count_strings(
+						pdev->dev.of_node,
+						"cdm-client-names");
+	CDM_CDBG("Num supported cdm_client = %d\n",
+		ptr->dt_num_supported_clients);
+	if (ptr->dt_num_supported_clients >
+		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+		pr_err("Invalid count of client names count=%d\n",
+			ptr->dt_num_supported_clients);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (ptr->dt_num_supported_clients < 0) {
+		CDM_CDBG("No cdm client names found\n");
+		ptr->dt_num_supported_clients = 0;
+		ptr->dt_cdm_shared = false;
+	} else {
+		ptr->dt_cdm_shared = true;
+	}
+	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+		CDM_CDBG("cdm-client-names[%d] = %s\n",	i,
+			ptr->dt_cdm_client_name[i]);
+		if (rc < 0) {
+			pr_err("Reading cdm-client-names failed\n");
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table)
+{
+	int rc;
+	struct cam_hw_soc_info *soc_ptr;
+	const struct of_device_id *id;
+
+	if (!cdm_hw  || (cdm_hw->soc_info.soc_private)
+		|| !(cdm_hw->soc_info.pdev))
+		return -EINVAL;
+
+	soc_ptr = &cdm_hw->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_ptr);
+	if (rc != 0) {
+		pr_err("Failed to retrieve the CDM dt properties\n");
+	} else {
+		soc_ptr->soc_private = kzalloc(
+				sizeof(struct cam_cdm_private_dt_data),
+				GFP_KERNEL);
+		if (!soc_ptr->soc_private)
+			return -ENOMEM;
+
+		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+			soc_ptr->soc_private);
+		if (rc != 0) {
+			pr_err("Failed to load CDM dt private data\n");
+			goto error;
+		}
+		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+		if ((!id) || !(id->data)) {
+			pr_err("Failed to retrieve the CDM id table\n");
+			goto error;
+		}
+		CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+			(struct cam_cdm_reg_offset_table *)id->data;
+		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+			id->compatible,
+			sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+	}
+
+	return rc;
+
+error:
+	rc = -1;
+	kfree(soc_ptr->soc_private);
+	soc_ptr->soc_private = NULL;
+	return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
+	CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
new file mode 100644
index 0000000..765aba4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev,
+	struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
new file mode 100644
index 0000000..034c782
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -0,0 +1,571 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+static unsigned int CDMCmdHeaderSizes[
+	CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+	0, /* UNUSED*/
+	3, /* DMI*/
+	0, /* UNUSED*/
+	2, /* RegContinuous*/
+	1, /* RegRandom*/
+	2, /* BUFFER_INDIREC*/
+	2, /* GenerateIRQ*/
+	3, /* WaitForEvent*/
+	1, /* ChangeBase*/
+	1, /* PERF_CONTINUOUSROL*/
+	3, /* DMI32*/
+	3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+	unsigned int count    : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+	unsigned int count     : 16;
+	unsigned int reserved0 : 8;
+	unsigned int cmd       : 8;
+	unsigned int offset    : 24;
+	unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr:  Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+	unsigned int length     : 16;
+	unsigned int reserved   : 8;
+	unsigned int cmd        : 8;
+	unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+	unsigned int base   : 24;
+	unsigned int cmd    : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+	unsigned int mask             : 8;
+	unsigned int id               : 8;
+	unsigned int iw_reserved      : 7;
+	unsigned int iw               : 1;
+	unsigned int cmd              : 8;
+	unsigned int offset           : 24;
+	unsigned int offset_reserved  : 8;
+	unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+	unsigned int reserved   : 24;
+	unsigned int cmd        : 8;
+	unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+	unsigned int perf     : 2;
+	unsigned int reserved : 22;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+	return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t  numVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+		(2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+	return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+	return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+	uint32_t numVals, uint32_t *pVals)
+{
+	uint32_t i;
+	struct cdm_regcontinuous_cmd *pHeader =
+		(struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+	pHeader->count = numVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+	pHeader->reserved0 = 0;
+	pHeader->reserved1 = 0;
+	pHeader->offset = reg;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+	for (i = 0; i < numVals; i++)
+		(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+	pCmdBuffer += numVals;
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+	uint32_t *pRegVals)
+{
+	uint32_t i;
+	uint32_t *dst, *src;
+	struct cdm_regrandom_cmd *pHeader =
+		(struct cdm_regrandom_cmd *)pCmdBuffer;
+
+	pHeader->count = numRegVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+	pHeader->reserved = 0;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	dst = pCmdBuffer;
+	src = pRegVals;
+	for (i = 0; i < numRegVals; i++) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+	uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+	uint32_t length)
+{
+	struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+	pHeader->cmd        = dmiCmd;
+	pHeader->addr = dmiBufferAddr;
+	pHeader->length = length - 1;
+	pHeader->DMIAddr = DMIAddr;
+	pHeader->DMISel = DMISel;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+	uint32_t length)
+{
+	struct cdm_indirect_cmd *pHeader =
+		(struct cdm_indirect_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+	pHeader->addr = indirectBufAddr;
+	pHeader->length = length - 1;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+	struct cdm_changebase_cmd *pHeader =
+		(struct cdm_changebase_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+	pHeader->base = base;
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+	return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+	struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+	pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+	cdm_get_cmd_header_size,
+	cdm_required_size_reg_continuous,
+	cdm_required_size_reg_random,
+	cdm_required_size_dmi,
+	cdm_required_size_genirq,
+	cdm_required_size_indirect,
+	cdm_required_size_changebase,
+	cdm_offsetof_dmi_addr,
+	cdm_offsetof_indirect_addr,
+	cdm_write_regcontinuous,
+	cdm_write_regrandom,
+	cdm_write_dmi,
+	cdm_write_indirect,
+	cdm_write_changebase,
+	cdm_write_genirq,
+};
+
+void cam_cdm_data_alignement_check(void)
+{
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI)));
+	BUILD_BUG_ON(sizeof(struct cdm_regcontinuous_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)));
+	BUILD_BUG_ON(sizeof(struct cdm_regrandom_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+	BUILD_BUG_ON(sizeof(struct cdm_indirect_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT)));
+	BUILD_BUG_ON(sizeof(struct cdm_genirq_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ)));
+	BUILD_BUG_ON(sizeof(struct cdm_wait_event_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT)));
+	BUILD_BUG_ON(sizeof(struct cdm_changebase_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE)));
+	BUILD_BUG_ON(sizeof(struct  cdm_perf_ctrl_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_PERF_CTRL)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_32)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_64)));
+}
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+	uint32_t base_array_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	void __iomem **device_base)
+{
+	int ret = -1, i;
+
+	for (i = 0; i < base_array_size; i++) {
+		if (base_table[i])
+			CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+			i, (base_table[i])->mem_cam_base, hw_base);
+		if ((base_table[i]) &&
+			((base_table[i])->mem_cam_base == hw_base)) {
+			*device_base = (base_table[i])->mem_base;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	int ret = 0;
+	uint32_t *data;
+	struct cdm_regcontinuous_cmd *reg_cont;
+
+	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+		(!base_addr)) {
+		pr_err(" invalid base addr and data length  %d %pK\n",
+			cmd_buf_size, base_addr);
+		return -EINVAL;
+	}
+
+	reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+	if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+		(((reg_cont->count * sizeof(uint32_t)) +
+			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+			cmd_buf_size)) {
+		pr_err(" buffer size %d is not sufficient for count%d\n",
+			cmd_buf_size, reg_cont->count);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+	cam_io_memcpy(base_addr + reg_cont->offset,	data,
+		reg_cont->count * sizeof(uint32_t));
+
+	*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_regrandom_cmd *reg_random;
+	uint32_t *data;
+
+	if (!base_addr) {
+		pr_err("invalid base address\n");
+		return -EINVAL;
+	}
+
+	reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+	if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+		(((reg_random->count * (sizeof(uint32_t) * 2)) +
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+			cmd_buf_size)) {
+		pr_err("invalid reg_count  %d cmd_buf_size %d\n",
+			reg_random->count, cmd_buf_size);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+	for (i = 0; i < reg_random->count; i++) {
+		CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
+			((uint64_t) base_addr + data[0]), data[1]);
+		cam_io_w(data[1], base_addr + data[0]);
+		data += 2;
+	}
+
+	*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+	return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+	void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_dmi_cmd *swd_dmi;
+	uint32_t *data;
+
+	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+		pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_required_size_dmi();
+
+	if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+		for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			cam_io_w_mb(data[1], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+			data += 2;
+		}
+	} else {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			data += 1;
+		}
+	}
+	*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+	return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag)
+{
+	int ret = 0;
+	uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+	uint32_t used_bytes = 0;
+
+	total_cmd_buf_size = cmd_buf_size;
+
+	while (cmd_buf_size > 0) {
+		CDM_CDBG("cmd data=%x\n", *cmd_buf);
+		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+		switch (cdm_cmd_type) {
+		case CAM_CDM_CMD_REG_CONT: {
+			ret = cam_cdm_util_reg_cont_write(*current_device_base,
+				cmd_buf, cmd_buf_size, &used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes/4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_REG_RANDOM: {
+			ret = cam_cdm_util_reg_random_write(
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_SWD_DMI_32:
+		case CAM_CDM_CMD_SWD_DMI_64: {
+			if (*current_device_base == 0) {
+				pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+					cdm_cmd_type);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE: {
+			struct cdm_changebase_cmd *change_base_cmd =
+				(struct cdm_changebase_cmd *)cmd_buf;
+
+			ret = cam_cdm_get_ioremap_from_base(
+				change_base_cmd->base, base_array_size,
+				base_table, current_device_base);
+			if (ret != 0) {
+				pr_err("Get ioremap change base failed %x\n",
+					change_base_cmd->base);
+				break;
+			}
+			CDM_CDBG("Got ioremap for %x addr=%pK\n",
+				change_base_cmd->base,
+				current_device_base);
+			cmd_buf_size -= (4 *
+				cdm_required_size_changebase());
+			cmd_buf += cdm_required_size_changebase();
+			}
+			break;
+		default:
+			pr_err(" unsupported cdm_cmd_type type 0%x\n",
+			cdm_cmd_type);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
new file mode 100644
index 0000000..09d0d63
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+#define CAM_CDM_SW_CMD_COUNT    2
+#define CAM_CMD_LENGTH_MASK     0xFFFF
+#define CAM_CDM_COMMAND_OFFSET  24
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET   8
+#define CAM_CDM_DMI_DATA_LO_OFFSET   12
+
+enum cam_cdm_command {
+	CAM_CDM_CMD_UNUSED = 0x0,
+	CAM_CDM_CMD_DMI = 0x1,
+	CAM_CDM_CMD_NOT_DEFINED = 0x2,
+	CAM_CDM_CMD_REG_CONT = 0x3,
+	CAM_CDM_CMD_REG_RANDOM = 0x4,
+	CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+	CAM_CDM_CMD_GEN_IRQ = 0x6,
+	CAM_CDM_CMD_WAIT_EVENT = 0x7,
+	CAM_CDM_CMD_CHANGE_BASE = 0x8,
+	CAM_CDM_CMD_PERF_CTRL = 0x9,
+	CAM_CDM_CMD_DMI_32 = 0xa,
+	CAM_CDM_CMD_DMI_64 = 0xb,
+	CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+	CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+	CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+	CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ *                           in DWORDs.
+ *      @command Command ID
+ *      @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ *                                    command in dwords.
+ *      @numVals Number of continuous values
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ *                                in dwords.
+ *      @numRegVals  Number of register/value pairs
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ *                              in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ *                                in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ *                         command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ *                              indirect command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ *      @pCmdBuffer:  Pointer to command buffer
+ *      @reg: Beginning of the register address range where
+ *            values will be written.
+ *      @numVals: Number of values (registers) that will be written
+ *      @pVals : An array of values that will be written
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ *                       register/value pairs.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @numRegVals: Number of register/value pairs that will be written
+ *      @pRegVals: An array of register/value pairs that will be written
+ *                 The even indices are registers and the odd indices
+ *                 arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @dmiCmd: DMI command
+ *      @DMIAddr: Address of the DMI
+ *      @DMISel: Selected bank that the DMI will write to
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @indirectBufferAddr: Device address of the indirect cmd buffer.
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ *                        the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @base: New base (device) address
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq:  Writes a gen irq command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t  numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+	uint32_t *pCmdBuffer,
+	uint32_t reg,
+	uint32_t numVals,
+	uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+	uint32_t *pCmdBuffer,
+	uint32_t numRegVals,
+	uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+	uint32_t *pCmdBuffer,
+	uint8_t  dmiCmd,
+	uint32_t DMIAddr,
+	uint8_t  DMISel,
+	uint32_t dmiBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+	uint32_t *pCmdBuffer,
+	uint32_t indirectBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+	uint32_t *pCmdBuffer,
+	uint32_t base);
+void (*cdm_write_genirq)(
+	uint32_t *pCmdBuffer,
+	uint32_t  userdata);
+};
+
+#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
new file mode 100644
index 0000000..ed07218
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
new file mode 100644
index 0000000..e34bfc2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		if (payload->irq_status & 0x2) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+				payload->irq_data,
+				&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+		if (payload->irq_status & 0x1) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		kfree(payload);
+	}
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	mutex_lock(&client->lock);
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_cpu_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+				&len);
+		} else if (req->data->type ==
+			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+			rc = 0;
+			vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+		} else {
+			pr_err("Only mem hdl/Kernel va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+				cdm_cmd->cmd[i].len, len);
+			rc = cam_cdm_util_cmd_buf_write(
+				&client->changebase_addr,
+				((uint32_t *)vaddr_ptr +
+					((cdm_cmd->cmd[i].offset)/4)),
+				cdm_cmd->cmd[i].len, client->data.base_array,
+				client->data.base_array_cnt, core->bl_tag);
+			if (rc) {
+				pr_err("write failed for cnt=%d:%d\n",
+					i, req->data->cmd_arrary_count);
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (!rc) {
+			struct cam_cdm_work_payload *payload;
+
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			if ((true == req->data->flag) &&
+				(i == req->data->cmd_arrary_count)) {
+				struct cam_cdm_bl_cb_request_entry *node;
+
+				node = kzalloc(sizeof(
+					struct cam_cdm_bl_cb_request_entry),
+					GFP_KERNEL);
+				if (!node) {
+					rc = -ENOMEM;
+					break;
+				}
+				node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+				node->client_hdl = req->handle;
+				node->cookie = req->data->cookie;
+				node->bl_tag = core->bl_tag;
+				node->userdata = req->data->userdata;
+				mutex_lock(&cdm_hw->hw_mutex);
+				list_add_tail(&node->entry,
+					&core->bl_request_list);
+				mutex_unlock(&cdm_hw->hw_mutex);
+
+				payload = kzalloc(sizeof(
+					struct cam_cdm_work_payload),
+					GFP_ATOMIC);
+				if (payload) {
+					payload->irq_status = 0x2;
+					payload->irq_data = core->bl_tag;
+					payload->hw = cdm_hw;
+					INIT_WORK((struct work_struct *)
+						&payload->work,
+						cam_virtual_cdm_work);
+					queue_work(core->work_queue,
+						&payload->work);
+					}
+			}
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL nothing for virtual\n");
+			if (!rc && (core->bl_tag == 63))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	int rc;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+	cdm_hw->soc_info.soc_private = kzalloc(
+			sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+	if (!cdm_hw->soc_info.soc_private) {
+		rc = -ENOMEM;
+		goto soc_load_failed;
+	}
+
+	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+	if (rc != 0) {
+		pr_err("Failed to load CDM dt private data\n");
+		rc = -1;
+		kfree(cdm_hw->soc_info.soc_private);
+		cdm_hw->soc_info.soc_private = NULL;
+		goto soc_load_failed;
+	}
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+					cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = NULL;
+	cdm_hw_intf->hw_ops.deinit = NULL;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	cdm_hw->open_count = 0;
+	cdm_core->iommu_hdl.non_secure = -1;
+	cdm_core->iommu_hdl.secure = -1;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+	mutex_lock(&cdm_hw->hw_mutex);
+	cdm_core->id = CAM_CDM_VIRTUAL;
+	memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+		sizeof(CAM_CDM_VIRTUAL_NAME));
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+	cdm_core->ops = NULL;
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+		CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto cpas_registration_failed;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface registration failed\n");
+		goto intf_registration_failed;
+	}
+	CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	return 0;
+intf_registration_failed:
+	cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+	kfree(cdm_hw->soc_info.soc_private);
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = -EBUSY;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+			cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface de-registration failed\n");
+		return rc;
+	}
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	rc = 0;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
new file mode 100644
index 0000000..183b657
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+	{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+	.first_offset = 0x0,
+	.last_offset = 0x200,
+	.reg_count = 0x30,
+	.offsets = cam170_cpas_cdm_register_offsets,
+	.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+		sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/Makefile b/drivers/media/platform/msm/camera/cam_cpas/Makefile
new file mode 100644
index 0000000..63dc58e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/cpas_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/camss_top
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
new file mode 100644
index 0000000..4f246e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -0,0 +1,1415 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+	int i;
+
+	for (i = 0; i < num_strings; i++) {
+		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+			CPAS_CDBG("matched %s : %d\n", matching_string, i);
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (reg_info->enable == false)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	if (reg_info->masked_value) {
+		value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base +
+			reg_info->offset);
+		value = value & (~reg_info->mask);
+		value = value | (reg_info->value << reg_info->shift);
+	} else {
+		value = reg_info->value;
+	}
+
+	CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+		reg_base, reg_info->offset, value);
+
+	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+		reg_info->offset);
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+	struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+		pr_err("Invalid params %d %d\n", bus_client->valid,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	if (level >= bus_client->num_usecases) {
+		pr_err("Invalid vote level=%d, usecases=%d\n", level,
+			bus_client->num_usecases);
+		return -EINVAL;
+	}
+
+	if (level == bus_client->curr_vote_level)
+		return 0;
+
+	CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+	msm_bus_scale_client_update_request(bus_client->client_id, level);
+	bus_client->curr_vote_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib)
+{
+	struct msm_bus_paths *path;
+	struct msm_bus_scale_pdata *pdata;
+	int idx = 0;
+
+	if (!bus_client->valid) {
+		pr_err("bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if ((bus_client->num_usecases != 2) ||
+		(bus_client->num_paths != 1) ||
+		(bus_client->dyn_vote != true)) {
+		pr_err("dynamic update not allowed %d %d %d\n",
+			bus_client->num_usecases, bus_client->num_paths,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bus_client->lock);
+
+	if (bus_client->curr_vote_level > 1) {
+		pr_err("curr_vote_level %d cannot be greater than 1\n",
+			bus_client->curr_vote_level);
+		mutex_unlock(&bus_client->lock);
+		return -EINVAL;
+	}
+
+	idx = bus_client->curr_vote_level;
+	idx = 1 - idx;
+	bus_client->curr_vote_level = idx;
+	mutex_unlock(&bus_client->lock);
+
+	pdata = bus_client->pdata;
+	path = &(pdata->usecase[idx]);
+	path->vectors[0].ab = ab;
+	path->vectors[0].ib = ib;
+
+	CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+		bus_client->client_id, ab, ib, idx);
+	msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+	return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+	struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+	struct cam_cpas_bus_client *bus_client)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	uint32_t client_id;
+	int rc;
+
+	pdata = msm_bus_pdata_from_node(soc_info->pdev,
+		dev_node);
+	if (!pdata) {
+		pr_err("failed get_pdata\n");
+		return -EINVAL;
+	}
+
+	if ((pdata->num_usecases == 0) ||
+		(pdata->usecase[0].num_paths == 0)) {
+		pr_err("usecase=%d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	client_id = msm_bus_scale_register_client(pdata);
+	if (!client_id) {
+		pr_err("failed in register ahb bus client\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bus_client->dyn_vote = of_property_read_bool(dev_node,
+		"qcom,msm-bus-vector-dyn-vote");
+
+	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+		pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto fail_unregister_client;
+	}
+
+	msm_bus_scale_client_update_request(client_id, 0);
+
+	bus_client->src = pdata->usecase[0].vectors[0].src;
+	bus_client->dst = pdata->usecase[0].vectors[0].dst;
+	bus_client->pdata = pdata;
+	bus_client->client_id = client_id;
+	bus_client->num_usecases = pdata->num_usecases;
+	bus_client->num_paths = pdata->usecase[0].num_paths;
+	bus_client->curr_vote_level = 0;
+	bus_client->valid = true;
+	mutex_init(&bus_client->lock);
+
+	CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+		bus_client->src, bus_client->dst, bus_client->client_id);
+
+	return 0;
+fail_unregister_client:
+	msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+	return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+	struct cam_cpas_bus_client *bus_client)
+{
+	if (!bus_client->valid)
+		return -EINVAL;
+
+	if (bus_client->dyn_vote)
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0);
+	else
+		cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+	msm_bus_scale_unregister_client(bus_client->client_id);
+	bus_client->valid = false;
+
+	mutex_destroy(&bus_client->lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		cam_cpas_util_unregister_bus_client(&curr_port->mnoc_bus);
+		of_node_put(curr_port->axi_port_mnoc_node);
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_unregister_bus_client(
+				&curr_port->camnoc_bus);
+			of_node_put(curr_port->axi_port_camnoc_node);
+		}
+		of_node_put(curr_port->axi_port_node);
+		list_del(&curr_port->sibling_port);
+		mutex_destroy(&curr_port->lock);
+		kfree(curr_port);
+	}
+
+	of_node_put(soc_private->axi_port_list_node);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *axi_port;
+	int rc;
+	struct device_node *axi_port_list_node;
+	struct device_node *axi_port_node = NULL;
+	struct device_node *axi_port_mnoc_node = NULL;
+	struct device_node *axi_port_camnoc_node = NULL;
+
+	INIT_LIST_HEAD(&cpas_core->axi_ports_list_head);
+
+	axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
+		"qcom,axi-port-list");
+	if (!axi_port_list_node) {
+		pr_err("Node qcom,axi-port-list not found.\n");
+		return -EINVAL;
+	}
+
+	soc_private->axi_port_list_node = axi_port_list_node;
+
+	for_each_available_child_of_node(axi_port_list_node, axi_port_node) {
+		axi_port = kzalloc(sizeof(*axi_port), GFP_KERNEL);
+		if (!axi_port) {
+			rc = -ENOMEM;
+			goto error_previous_axi_cleanup;
+		}
+		axi_port->axi_port_node = axi_port_node;
+
+		rc = of_property_read_string_index(axi_port_node,
+			"qcom,axi-port-name", 0,
+			(const char **)&axi_port->axi_port_name);
+		if (rc) {
+			pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+			goto port_name_fail;
+		}
+
+		axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
+			"qcom,axi-port-mnoc");
+		if (!axi_port_mnoc_node) {
+			pr_err("Node qcom,axi-port-mnoc not found.\n");
+			rc = -EINVAL;
+			goto mnoc_node_get_fail;
+		}
+		axi_port->axi_port_mnoc_node = axi_port_mnoc_node;
+
+		rc = cam_cpas_util_register_bus_client(soc_info,
+			axi_port_mnoc_node, &axi_port->mnoc_bus);
+		if (rc)
+			goto mnoc_register_fail;
+
+		if (soc_private->axi_camnoc_based) {
+			axi_port_camnoc_node = of_find_node_by_name(
+				axi_port_node, "qcom,axi-port-camnoc");
+			if (!axi_port_camnoc_node) {
+				pr_err("Node qcom,axi-port-camnoc not found\n");
+				rc = -EINVAL;
+				goto camnoc_node_get_fail;
+			}
+			axi_port->axi_port_camnoc_node = axi_port_camnoc_node;
+
+			rc = cam_cpas_util_register_bus_client(soc_info,
+				axi_port_camnoc_node, &axi_port->camnoc_bus);
+			if (rc)
+				goto camnoc_register_fail;
+		}
+
+		mutex_init(&axi_port->lock);
+
+		INIT_LIST_HEAD(&axi_port->sibling_port);
+		list_add_tail(&axi_port->sibling_port,
+			&cpas_core->axi_ports_list_head);
+		INIT_LIST_HEAD(&axi_port->clients_list_head);
+	}
+
+	return 0;
+camnoc_register_fail:
+	of_node_put(axi_port->axi_port_camnoc_node);
+camnoc_node_get_fail:
+	cam_cpas_util_unregister_bus_client(&axi_port->mnoc_bus);
+mnoc_register_fail:
+	of_node_put(axi_port->axi_port_mnoc_node);
+mnoc_node_get_fail:
+port_name_fail:
+	of_node_put(axi_port->axi_port_node);
+	kfree(axi_port);
+error_previous_axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, soc_info);
+	return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+	int enable)
+{
+	int rc;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+	uint64_t camnoc_bw, mnoc_bw;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+	if (rc) {
+		pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+		return rc;
+	}
+
+	if (enable) {
+		mnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	} else {
+		mnoc_bw = 0;
+		camnoc_bw = 0;
+	}
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
+			mnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+				enable, rc);
+			goto remove_ahb_vote;
+		}
+
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_vote_bus_client_bw(
+				&curr_port->camnoc_bus, camnoc_bw, 0);
+			if (rc) {
+				pr_err("Failed in mnoc vote, enable=%d, %d\n",
+					enable, rc);
+				cam_cpas_util_vote_bus_client_bw(
+					&curr_port->mnoc_bus, 0, 0);
+				goto remove_ahb_vote;
+			}
+		}
+	}
+
+	return 0;
+remove_ahb_vote:
+	cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		CAM_SUSPEND_VOTE);
+	return rc;
+}
+
+static int cam_cpas_util_insert_client_to_axi_port(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, int32_t client_indx)
+{
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		if (strnstr(curr_port->axi_port_name,
+			soc_private->client_axi_port_name[client_indx],
+			strlen(curr_port->axi_port_name))) {
+
+			cpas_client->axi_port = curr_port;
+			INIT_LIST_HEAD(&cpas_client->axi_sibling_client);
+
+			mutex_lock(&curr_port->lock);
+			list_add_tail(&cpas_client->axi_sibling_client,
+				&cpas_client->axi_port->clients_list_head);
+			mutex_unlock(&curr_port->lock);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void cam_cpas_util_remove_client_from_axi_port(
+	struct cam_cpas_client *cpas_client)
+{
+	mutex_lock(&cpas_client->axi_port->lock);
+	list_del(&cpas_client->axi_sibling_client);
+	mutex_unlock(&cpas_client->axi_port->lock);
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		cam_io_w_mb(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		cam_io_w(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t *value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t reg_value;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!value)
+		return -EINVAL;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		reg_value = cam_io_r(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+	*value = reg_value;
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+	struct cam_cpas *cpas_core, struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas_client *curr_client;
+	struct cam_cpas_client *temp_client;
+	struct cam_axi_vote req_axi_vote = *axi_vote;
+	struct cam_cpas_axi_port *axi_port = cpas_client->axi_port;
+	uint64_t camnoc_bw = 0, mnoc_bw = 0;
+	int rc = 0;
+
+	if (!axi_port) {
+		pr_err("axi port does not exists\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure we use same bw for both compressed, uncompressed
+	 * in case client has requested either of one only
+	 */
+	if (req_axi_vote.compressed_bw == 0)
+		req_axi_vote.compressed_bw = req_axi_vote.uncompressed_bw;
+
+	if (req_axi_vote.uncompressed_bw == 0)
+		req_axi_vote.uncompressed_bw = req_axi_vote.compressed_bw;
+
+	if ((cpas_client->axi_vote.compressed_bw ==
+		req_axi_vote.compressed_bw) &&
+		(cpas_client->axi_vote.uncompressed_bw ==
+		req_axi_vote.uncompressed_bw))
+		return 0;
+
+	mutex_lock(&axi_port->lock);
+	cpas_client->axi_vote = req_axi_vote;
+
+	list_for_each_entry_safe(curr_client, temp_client,
+		&axi_port->clients_list_head, axi_sibling_client) {
+		camnoc_bw += curr_client->axi_vote.uncompressed_bw;
+		mnoc_bw += curr_client->axi_vote.compressed_bw;
+	}
+
+	if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
+		mnoc_bw = camnoc_bw;
+
+	CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+		axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
+		axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
+		camnoc_bw, mnoc_bw);
+
+	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
+		mnoc_bw, 0);
+	if (rc) {
+		pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+			mnoc_bw, mnoc_bw, rc);
+		goto unlock_axi_port;
+	}
+
+	if (soc_private->axi_camnoc_based) {
+		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
+			camnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+				camnoc_bw, camnoc_bw, rc);
+			goto unlock_axi_port;
+		}
+	}
+
+unlock_axi_port:
+	mutex_unlock(&axi_port->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote, client_handle=%d\n", client_handle);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private,
+		cpas_core->cpas_client[client_indx], axi_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+	enum cam_vote_level required_level;
+	enum cam_vote_level highest_level;
+	int i, rc = 0;
+
+	if (!ahb_bus_client->valid) {
+		pr_err("AHB Bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+		pr_err("Dynamic AHB vote not supported\n");
+		return -EINVAL;
+	}
+
+	required_level = ahb_vote->vote.level;
+
+	if (cpas_client->ahb_level == required_level)
+		return 0;
+
+	mutex_lock(&ahb_bus_client->lock);
+	cpas_client->ahb_level = required_level;
+
+	CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+		required_level, ahb_bus_client->curr_vote_level);
+
+	if (required_level == ahb_bus_client->curr_vote_level)
+		goto unlock_bus_client;
+
+	highest_level = required_level;
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (cpas_core->cpas_client[i] && (highest_level <
+			cpas_core->cpas_client[i]->ahb_level))
+			highest_level = cpas_core->cpas_client[i]->ahb_level;
+	}
+
+	CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+
+	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+		highest_level);
+	if (rc)
+		pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+			highest_level, rc);
+
+unlock_bus_client:
+	mutex_unlock(&ahb_bus_client->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!ahb_vote || (ahb_vote->vote.level == 0)) {
+		pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		ahb_vote->vote.freq,
+		cpas_core->cpas_client[client_indx]->ahb_level);
+
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+		cpas_core->cpas_client[client_indx], ahb_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_start *cmd_hw_start;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+	ahb_vote = cmd_hw_start->ahb_vote;
+	axi_vote = cmd_hw_start->axi_vote;
+
+	if (!ahb_vote || !axi_vote)
+		return -EINVAL;
+
+	if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+			ahb_vote->vote.level, axi_vote->compressed_bw,
+			axi_vote->uncompressed_bw);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client is not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is in start state\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		cpas_client->ahb_level);
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		ahb_vote);
+	if (rc)
+		goto done;
+
+	CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, axi_vote);
+	if (rc)
+		goto done;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("enable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+
+		if (cpas_core->internal_ops.power_on_settings) {
+			rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+			if (rc) {
+				cam_cpas_soc_disable_resources(
+					&cpas_hw->soc_info);
+				pr_err("failed in power_on settings rc=%d\n",
+					rc);
+				goto done;
+			}
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	}
+
+	cpas_client->started = true;
+	cpas_core->streamon_clients++;
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc = 0;
+
+	if (!hw_priv || !stop_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not started\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+	cpas_client->started = false;
+	cpas_core->streamon_clients--;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("disable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		&ahb_vote);
+	if (rc)
+		goto done;
+
+	axi_vote.uncompressed_bw = 0;
+	axi_vote.compressed_bw = 0;
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, &axi_vote);
+
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	int rc = 0;
+
+	if (!hw_priv || !init_hw_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("INIT HW size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_core->internal_ops.init_hw_version) {
+		rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+			(struct cam_cpas_hw_caps *)init_hw_args);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+	struct cam_cpas_client *cpas_client;
+	char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+	int32_t client_indx = -1;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+		register_params->identifier, register_params->cell_index);
+
+	if (soc_private->client_id_based)
+		snprintf(client_name, sizeof(client_name), "%s%d",
+			register_params->identifier,
+			register_params->cell_index);
+	else
+		snprintf(client_name, sizeof(client_name), "%s",
+			register_params->identifier);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+
+	rc = cam_cpas_util_get_string_index(soc_private->client_name,
+		soc_private->num_clients, client_name, &client_indx);
+	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("Invalid Client register : %s %d, %d\n",
+			register_params->identifier,
+			register_params->cell_index, client_indx);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EPERM;
+	}
+
+	cpas_client = kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+	if (!cpas_client) {
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -ENOMEM;
+	}
+
+	rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
+		cpas_client, client_indx);
+	if (rc) {
+		pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+			client_indx, rc);
+		kfree(cpas_client);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	register_params->client_handle =
+		CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+	memcpy(&cpas_client->data, register_params,
+		sizeof(struct cam_cpas_register_params));
+	cpas_core->cpas_client[client_indx] = cpas_client;
+	cpas_core->registered_clients++;
+
+	mutex_unlock(&cpas_hw->hw_mutex);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not stopped\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cam_cpas_util_remove_client_from_axi_port(
+		cpas_core->cpas_client[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	kfree(cpas_core->cpas_client[client_indx]);
+	cpas_core->cpas_client[client_indx] = NULL;
+	cpas_core->registered_clients--;
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_cpas_hw_caps *hw_caps;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+	*hw_caps = cpas_core->hw_caps;
+
+	return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+		pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
+			cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+		struct cam_cpas_register_params *register_params;
+
+		if (sizeof(struct cam_cpas_register_params) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		register_params = (struct cam_cpas_register_params *)cmd_args;
+		rc = cam_cpas_hw_register_client(hw_priv, register_params);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+		uint32_t *client_handle;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		client_handle = (uint32_t *)cmd_args;
+		rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_WRITE: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_write =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+			reg_write->reg_base, reg_write->offset, reg_write->mb,
+			reg_write->value);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_READ: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_read =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_read(hw_priv,
+			reg_read->client_handle, reg_read->reg_base,
+			reg_read->offset, reg_read->mb, &reg_read->value);
+
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AHB_VOTE: {
+		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+		rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+			cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AXI_VOTE: {
+		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+		rc = cam_cpas_hw_update_axi_vote(hw_priv,
+			cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+		break;
+	}
+	default:
+		pr_err("CPAS HW command not valid =%d\n", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		mutex_init(&cpas_core->client_mutex[i]);
+		cpas_core->cpas_client[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		if (cpas_core->cpas_client[i]) {
+			cam_cpas_hw_unregister_client(cpas_hw, i);
+			cpas_core->cpas_client[i] = NULL;
+		}
+		mutex_destroy(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+	struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+	if (rc) {
+		pr_err("failed to get arch-compat rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CAMSSTOP;
+		rc = cam_camsstop_get_internal_ops(internal_ops);
+	} else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CPASTOP;
+		rc = cam_cpastop_get_internal_ops(internal_ops);
+	} else {
+		pr_err("arch-compat %s not supported\n", compat_str);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_info *cpas_hw = NULL;
+	struct cam_hw_intf *cpas_hw_intf = NULL;
+	struct cam_cpas *cpas_core = NULL;
+	struct cam_cpas_private_soc *soc_private;
+	struct cam_cpas_internal_ops *internal_ops;
+
+	cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cpas_hw_intf)
+		return -ENOMEM;
+
+	cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cpas_hw) {
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+	if (!cpas_core) {
+		kfree(cpas_hw);
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+		cpas_core->regbase_index[i] = -1;
+
+	cpas_hw_intf->hw_priv = cpas_hw;
+	cpas_hw->core_info = cpas_core;
+
+	cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cpas_hw->soc_info.pdev = pdev;
+	cpas_hw->open_count = 0;
+	mutex_init(&cpas_hw->hw_mutex);
+	spin_lock_init(&cpas_hw->hw_lock);
+	init_completion(&cpas_hw->hw_complete);
+
+	cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+	cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+	cpas_hw_intf->hw_ops.deinit = NULL;
+	cpas_hw_intf->hw_ops.reset = NULL;
+	cpas_hw_intf->hw_ops.reserve = NULL;
+	cpas_hw_intf->hw_ops.release = NULL;
+	cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+	cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+	cpas_hw_intf->hw_ops.read = NULL;
+	cpas_hw_intf->hw_ops.write = NULL;
+	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+	internal_ops = &cpas_core->internal_ops;
+	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+	if (rc != 0)
+		goto release_mem;
+
+	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+		internal_ops->handle_irq, cpas_hw);
+	if (rc)
+		goto release_mem;
+
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cpas_core->num_clients = soc_private->num_clients;
+
+	if (internal_ops->setup_regbase) {
+		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+			cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+		if (rc)
+			goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_client_setup(cpas_hw);
+	if (rc) {
+		pr_err("failed in client setup, rc=%d\n", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+		cpas_hw->soc_info.pdev->dev.of_node,
+		&cpas_core->ahb_bus_client);
+	if (rc) {
+		pr_err("failed in ahb setup, rc=%d\n", rc);
+		goto client_cleanup;
+	}
+
+	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in axi setup, rc=%d\n", rc);
+		goto ahb_cleanup;
+	}
+
+	/* Need to vote first before enabling clocks */
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+	if (rc)
+		goto axi_cleanup;
+
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	if (internal_ops->get_hw_info) {
+		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+		if (rc) {
+			pr_err("failed in get_hw_info, rc=%d\n", rc);
+			goto disable_soc_res;
+		}
+	} else {
+		pr_err("Invalid get_hw_info\n");
+		goto disable_soc_res;
+	}
+
+	rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+		&cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+	if (rc)
+		goto disable_soc_res;
+
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+	if (rc)
+		goto axi_cleanup;
+
+	*hw_intf = cpas_hw_intf;
+	return 0;
+
+disable_soc_res:
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+remove_default_vote:
+	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+	cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_mem:
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+	pr_err("failed in hw probe\n");
+	return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+
+	if (!cpas_hw_intf) {
+		pr_err("cpas interface not initialized\n");
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+		pr_err("cpas hw is in power up state\n");
+		return -EINVAL;
+	}
+
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+	cam_cpas_util_client_cleanup(cpas_hw);
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
new file mode 100644
index 0000000..c181302
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CPAS_MAX_CLIENTS 20
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) ((indx >= 0) && (indx < CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
+	((CAM_CPAS_CLIENT_VALID(indx)) && \
+	(cpas_core->cpas_client[indx]))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx)          \
+	((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+	(cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+	CAM_REG_TYPE_READ,
+	CAM_REG_TYPE_WRITE,
+	CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on_settings: Function pointer for hw core specific power on settings
+ *
+ */
+struct cam_cpas_internal_ops {
+	int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	irqreturn_t (*handle_irq)(int irq_num, void *data);
+	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+		int32_t regbase_index[], int32_t num_reg_map);
+	int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+	bool enable;
+	enum cam_cpas_access_type access_type;
+	bool masked_value;
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @started: Whether client has streamed on
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @axi_sibling_client: Client's sibllings sharing the same axi port
+ *
+ */
+struct cam_cpas_client {
+	struct cam_cpas_register_params data;
+	bool started;
+	enum cam_vote_level ahb_level;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_axi_port *axi_port;
+	struct list_head axi_sibling_client;
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ *
+ */
+struct cam_cpas_bus_client {
+	int src;
+	int dst;
+	struct msm_bus_scale_pdata *pdata;
+	uint32_t client_id;
+	int num_usecases;
+	int num_paths;
+	unsigned int curr_vote_level;
+	bool dyn_vote;
+	struct mutex lock;
+	bool valid;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @sibling_port: Sibling AXI ports
+ * @clients_list_head: List head pointing to list of clients sharing this port
+ * @lock: Mutex lock for accessing this port
+ * @camnoc_bus: CAMNOC bus client info for this port
+ * @mnoc_bus: MNOC bus client info for this port
+ * @axi_port_name: Name of this AXI port
+ * @axi_port_node: Node representing this AXI Port
+ * @axi_port_mnoc_node: Node representing mnoc in this AXI Port
+ * @axi_port_camnoc_node: Node representing camnoc in this AXI Port
+ *
+ */
+struct cam_cpas_axi_port {
+	struct list_head sibling_port;
+	struct list_head clients_list_head;
+	struct mutex lock;
+	struct cam_cpas_bus_client camnoc_bus;
+	struct cam_cpas_bus_client mnoc_bus;
+	const char *axi_port_name;
+	struct device_node *axi_port_node;
+	struct device_node *axi_port_mnoc_node;
+	struct device_node *axi_port_camnoc_node;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @num_clients: Total number of clients that CPAS supports
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_ports_list_head: Head pointing to list of AXI ports
+ * @internal_ops: CPAS HW internal ops
+ *
+ */
+struct cam_cpas {
+	struct cam_cpas_hw_caps hw_caps;
+	struct cam_cpas_client *cpas_client[CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CPAS_MAX_CLIENTS];
+	uint32_t num_clients;
+	uint32_t registered_clients;
+	uint32_t streamon_clients;
+	int32_t regbase_index[CAM_CPAS_REG_MAX];
+	struct cam_cpas_bus_client ahb_bus_client;
+	struct list_head axi_ports_list_head;
+	struct cam_cpas_internal_ops internal_ops;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
new file mode 100644
index 0000000..d2c3e06
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -0,0 +1,137 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+
+#ifdef CONFIG_CAM_CPAS_DBG
+#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#undef pr_fmt
+#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
+
+#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+	CAM_HW_CPASTOP,
+	CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+	CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_REG_WRITE,
+	CAM_CPAS_HW_CMD_REG_READ,
+	CAM_CPAS_HW_CMD_AHB_VOTE,
+	CAM_CPAS_HW_CMD_AXI_VOTE,
+	CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+	uint32_t client_handle;
+	enum cam_cpas_reg_base reg_base;
+	uint32_t offset;
+	uint32_t value;
+	bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+	uint32_t client_handle;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+	uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+	uint32_t camera_family;
+	struct cam_hw_version camera_version;
+	struct cam_hw_version cpas_version;
+	uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
new file mode 100644
index 0000000..fdebdc7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -0,0 +1,605 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME    "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+	struct platform_device *pdev;
+	struct cam_subdev subdev;
+	struct cam_hw_intf *hw_intf;
+	struct cam_cpas_hw_caps hw_caps;
+	struct mutex intf_lock;
+	uint32_t open_cnt;
+	bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+	struct cam_hw_version *camera_version)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!camera_family || !camera_version) {
+		pr_err("invalid input %pK %pK\n", camera_family,
+			camera_version);
+		return -EINVAL;
+	}
+
+	*camera_family = g_cpas_intf->hw_caps.camera_family;
+	*camera_version = g_cpas_intf->hw_caps.camera_version;
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+		cmd_reg_write.client_handle = client_handle;
+		cmd_reg_write.reg_base = reg_base;
+		cmd_reg_write.offset = offset;
+		cmd_reg_write.value = value;
+		cmd_reg_write.mb = mb;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t *value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!value) {
+		pr_err("Invalid arg value\n");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+		cmd_reg_read.client_handle = client_handle;
+		cmd_reg_read.reg_base = reg_base;
+		cmd_reg_read.offset = offset;
+		cmd_reg_read.mb = mb;
+		cmd_reg_read.value = 0;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc) {
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			return rc;
+		}
+
+		*value = cmd_reg_read.value;
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+		cmd_axi_vote.client_handle = client_handle;
+		cmd_axi_vote.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+			sizeof(struct cam_cpas_hw_cmd_axi_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+		cmd_ahb_vote.client_handle = client_handle;
+		cmd_ahb_vote.ahb_vote = ahb_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.stop) {
+		struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+		cmd_hw_stop.client_handle = client_handle;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.stop(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+			sizeof(struct cam_cpas_hw_cmd_stop));
+		if (rc)
+			pr_err("Failed in stop, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid stop ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.start) {
+		struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+		cmd_hw_start.client_handle = client_handle;
+		cmd_hw_start.ahb_vote = ahb_vote;
+		cmd_hw_start.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.start(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+			sizeof(struct cam_cpas_hw_cmd_start));
+		if (rc)
+			pr_err("Failed in start, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid start ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+			&client_handle, sizeof(uint32_t));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+			sizeof(struct cam_cpas_register_params));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+	struct cam_control *cmd)
+{
+	int rc;
+
+	if (!cmd) {
+		pr_err("Invalid input cmd\n");
+		return -EINVAL;
+	}
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_cpas_query_cap query;
+
+		rc = copy_from_user(&query, (void __user *) cmd->handle,
+			sizeof(query));
+		if (rc) {
+			pr_err("Failed in copy from user, rc=%d\n", rc);
+			break;
+		}
+
+		rc = cam_cpas_get_hw_info(&query.camera_family,
+			&query.camera_version);
+		if (rc)
+			break;
+
+		rc = copy_to_user((void __user *) cmd->handle, &query,
+			sizeof(query));
+		if (rc)
+			pr_err("Failed in copy to user, rc=%d\n", rc);
+
+		break;
+	}
+	default:
+		pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt++;
+	CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt--;
+	CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+	.ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+	.core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+	.open = cam_cpas_subdev_open,
+	.close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_subdev *subdev;
+
+	if (!g_cpas_intf)
+		return -EINVAL;
+
+	subdev = &g_cpas_intf->subdev;
+
+	subdev->name = CAM_CPAS_DEV_NAME;
+	subdev->pdev = pdev;
+	subdev->ops = &cpas_subdev_ops;
+	subdev->internal_ops = &cpas_subdev_intern_ops;
+	subdev->token = g_cpas_intf;
+	subdev->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+	rc = cam_register_subdev(subdev);
+	if (rc) {
+		pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+		return rc;
+	}
+
+	platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+	return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_hw_caps *hw_caps;
+	struct cam_hw_intf *hw_intf;
+	int rc;
+
+	if (g_cpas_intf) {
+		pr_err("cpas dev proble already done\n");
+		return -EALREADY;
+	}
+
+	g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+	if (!g_cpas_intf)
+		return -ENOMEM;
+
+	mutex_init(&g_cpas_intf->intf_lock);
+	g_cpas_intf->pdev = pdev;
+
+	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+	if (rc || (g_cpas_intf->hw_intf == NULL)) {
+		pr_err("Failed in hw probe, rc=%d\n", rc);
+		goto error_destroy_mem;
+	}
+
+	hw_intf = g_cpas_intf->hw_intf;
+	hw_caps = &g_cpas_intf->hw_caps;
+	if (hw_intf->hw_ops.get_hw_caps) {
+		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+			hw_caps, sizeof(struct cam_cpas_hw_caps));
+		if (rc) {
+			pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+			goto error_hw_remove;
+		}
+	} else {
+		pr_err("Invalid get_hw_caps ops\n");
+		goto error_hw_remove;
+	}
+
+	rc = cam_cpas_subdev_register(pdev);
+	if (rc)
+		goto error_hw_remove;
+
+	g_cpas_intf->probe_done = true;
+	CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return rc;
+
+error_hw_remove:
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+	pr_err("CPAS probe failed\n");
+	return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_cpas_intf->intf_lock);
+	cam_unregister_subdev(&g_cpas_intf->subdev);
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+	mutex_unlock(&g_cpas_intf->intf_lock);
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+	{.compatible = "qcom,cam-cpas"},
+	{}
+};
+
+static struct platform_driver cam_cpas_driver = {
+	.probe = cam_cpas_dev_probe,
+	.remove = cam_cpas_dev_remove,
+	.driver = {
+		.name = CAM_CPAS_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cpas_dt_match,
+	},
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+	return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
new file mode 100644
index 0000000..0a8e6bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
+	struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *of_node;
+	int count = 0, i = 0, rc = 0;
+
+	if (!soc_private || !pdev) {
+		pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&soc_private->arch_compat);
+	if (rc) {
+		pr_err("device %s failed to read arch-compat\n", pdev->name);
+		return rc;
+	}
+
+	soc_private->client_id_based = of_property_read_bool(of_node,
+		"client-id-based");
+
+	count = of_property_count_strings(of_node, "client-names");
+	if (count <= 0) {
+		pr_err("no client-names found\n");
+		count = 0;
+		return -EINVAL;
+	}
+	soc_private->num_clients = count;
+	CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+		soc_private->arch_compat, soc_private->client_id_based,
+		soc_private->num_clients);
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-names", i, &soc_private->client_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+	}
+
+	count = of_property_count_strings(of_node, "client-axi-port-names");
+	if ((count <= 0) || (count != soc_private->num_clients)) {
+		pr_err("incorrect client-axi-port-names info %d %d\n",
+			count, soc_private->num_clients);
+		count = 0;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-axi-port-names", i,
+			&soc_private->client_axi_port_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+			soc_private->client_axi_port_name[i]);
+	}
+
+	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
+		"client-bus-camnoc-based");
+
+	return 0;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		pr_err("failed in get_dt_properties, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (soc_info->irq_line && !irq_handler) {
+		pr_err("Invalid IRQ handler\n");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		irq_data);
+	if (rc) {
+		pr_err("failed in request_platform_resource, rc=%d\n", rc);
+		return rc;
+	}
+
+	soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+		GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+
+	rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
+	if (rc) {
+		pr_err("failed in get_custom_info, rc=%d\n", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		pr_err("release platform failed, rc=%d\n", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("enable platform resource failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("disable platform failed, rc=%d\n", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
new file mode 100644
index 0000000..fdd9386
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+
+#define CAM_CPAS_MAX_CLIENTS 20
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @axi_camnoc_based: Whether AXi access is camnoc based
+ * @client_axi_port_name: AXI Port name for each client
+ * @axi_port_list_node : Node representing AXI Ports list
+ *
+ */
+struct cam_cpas_private_soc {
+	const char *arch_compat;
+	bool client_id_based;
+	uint32_t num_clients;
+	const char *client_name[CAM_CPAS_MAX_CLIENTS];
+	bool axi_camnoc_based;
+	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
+	struct device_node *axi_port_list_node;
+};
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
new file mode 100644
index 0000000..bce10cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
new file mode 100644
index 0000000..fa8ab89
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	CPAS_CDBG("Family %d, version %d.%d.%d\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+	return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camss", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMSS] = index;
+	} else {
+		pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+	internal_ops->init_hw_version = NULL;
+	internal_ops->handle_irq = NULL;
+	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+	internal_ops->power_on_settings = NULL;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
new file mode 100644
index 0000000..820a0df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
new file mode 100644
index 0000000..415de47
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -0,0 +1,301 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+	hw_caps->cpas_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->cpas_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->cpas_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+	hw_caps->camera_capability = reg_value;
+
+	CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_cpas_top", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+	} else {
+		pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camnoc", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+	} else {
+		pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	uint32_t reg_value;
+	int i;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	for (i = 0; i < camnoc_info->error_logger_size; i++) {
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i)
+{
+	uint32_t reg_value;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+{
+	pr_err("ahb timout error\n");
+
+	return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+	camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+	camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+	camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+	return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_clear);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_clear);
+	}
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_enable);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_enable);
+	}
+
+	return 0;
+}
+
+irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	uint32_t irq_status;
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int i;
+	enum cam_camnoc_hw_irq_type irq_type;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+			(camnoc_info->irq_err[i].enable)) {
+			irq_type = camnoc_info->irq_err[i].irq_type;
+			pr_err("Error occurred, type=%d\n", irq_type);
+
+			switch (irq_type) {
+			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+				cam_cpastop_handle_errlogger(cpas_core,
+					soc_info);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+				cam_cpastop_handle_ubwc_err(cpas_core,
+					soc_info, i);
+				break;
+			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+				cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+				break;
+			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+				CPAS_CDBG("TEST IRQ\n");
+				break;
+			default:
+				break;
+			}
+
+			irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+		}
+	}
+
+	if (irq_status)
+		pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+
+	if (TEST_IRQ_ENABLE)
+		cam_cpastop_disable_test_irq(cpas_hw);
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0)) {
+			camnoc_info = &cam170_cpas100_camnoc_info;
+		} else {
+			pr_err("CPAS Version not supported %d.%d.%d\n",
+				hw_caps->cpas_version.major,
+				hw_caps->cpas_version.minor,
+				hw_caps->cpas_version.incr);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Camera Version not supported %d.%d.%d\n",
+			hw_caps->camera_version.major,
+			hw_caps->camera_version.minor,
+			hw_caps->camera_version.incr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+	internal_ops->handle_irq = cam_cpastop_handle_irq;
+	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+	internal_ops->power_on_settings = cam_cpastop_static_settings;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
new file mode 100644
index 0000000..99aae3f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                                 1 QHB port) has an error logger. The error
+ *                                 observed at any slave port is logged into
+ *                                 the error logger register and an IRQ is
+ *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE0 UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE1 or IFE3
+ *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS
+ *                                               UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP
+ *                                               slave  times out after 4000
+ *                                               AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST              : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+		CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+	CAM_CAMNOC_HW_IRQ_RESERVED1,
+	CAM_CAMNOC_HW_IRQ_RESERVED2,
+	CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ *         settings like QoS, LUT mappings need to be configured for
+ *         each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+	CAM_CAMNOC_CDM,
+	CAM_CAMNOC_IFE02,
+	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IPE_BPS_LRME_READ,
+	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+	CAM_CAMNOC_JPEG,
+	CAM_CAMNOC_FD,
+	CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+	enum cam_camnoc_port_type port_type;
+	bool enable;
+	struct cam_cpas_reg priority_lut_low;
+	struct cam_cpas_reg priority_lut_high;
+	struct cam_cpas_reg urgency;
+	struct cam_cpas_reg danger_lut;
+	struct cam_cpas_reg safe_lut;
+	struct cam_cpas_reg ubwc_ctl;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+	struct cam_cpas_reg sbm_enable;
+	struct cam_cpas_reg sbm_status;
+	struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+	enum cam_camnoc_hw_irq_type irq_type;
+	bool enable;
+	uint32_t sbm_port;
+	struct cam_cpas_reg err_enable;
+	struct cam_cpas_reg err_status;
+	struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @error_logger_size: Array size of IRQ Error logger
+ *
+ */
+struct cam_camnoc_info {
+	struct cam_camnoc_specific *specific;
+	int specific_size;
+	struct cam_camnoc_irq_sbm *irq_sbm;
+	struct cam_camnoc_irq_err *irq_err;
+	int irq_err_size;
+	uint32_t *error_logger;
+	int error_logger_size;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
new file mode 100644
index 0000000..8686bd5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -0,0 +1,532 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x7 : 0x3,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x7,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x7,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = false,
+	}
+};
+
+uint32_t slave_error_logger[] = {
+	0x2700, /* ERRLOGGER_SWID_LOW */
+	0x2704, /* ERRLOGGER_SWID_HIGH */
+	0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+	.specific = &cam_cpas100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
+		sizeof(cam_cpas100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas100_irq_sbm,
+	.irq_err = &cam_cpas100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas100_irq_err) /
+		sizeof(cam_cpas100_irq_err[0]),
+	.error_logger = &slave_error_logger[0],
+	.error_logger_size = sizeof(slave_error_logger) /
+		sizeof(slave_error_logger[0]),
+};
+
+#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
new file mode 100644
index 0000000..f6b0729
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -0,0 +1,324 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ *                          are the identifiers used in generic register
+ *                          write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+	CAM_CPAS_REG_CPASTOP,
+	CAM_CPAS_REG_CAMNOC,
+	CAM_CPAS_REG_CAMSS,
+	CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                              1 QHB port) has an error logger. The error
+ *                              observed at any slave port is logged into
+ *                              the error logger register and an IRQ is
+ *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 or IFE3 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC decoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP slave
+ *                                            times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier        : Input identifier string which is the device label
+ *                      from dt like vfe, ife, jpeg etc
+ * @cell_index        : Input integer identifier pointing to the cell index
+ *                      from dt of the device. This can be used to form a
+ *                      unique string with @identifier like vfe0, ife1,
+ *                      jpeg0, etc
+ * @dev               : device handle
+ * @userdata          : Input private data which will be passed as
+ *                      an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ *                      callbacks from CPAS driver.
+ *                      @client_handle : CPAS client handle
+ *                      @userdata    : User data given at the time of register
+ *                      @event_type  : event type
+ *                      @event_data  : event data
+ * @client_handle       : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+	char            identifier[CAM_HW_IDENTIFIER_LENGTH];
+	uint32_t        cell_index;
+	struct device  *dev;
+	void           *userdata;
+	void          (*cam_cpas_client_cb)(
+			int32_t                   client_handle,
+			void                     *userdata,
+			enum cam_camnoc_irq_type  event_type,
+			uint32_t                  event_data);
+	uint32_t        client_handle;
+};
+
+/**
+ * enum cam_vote_level - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC  : Dynamic vote
+ */
+enum cam_vote_type {
+	CAM_VOTE_ABSOLUTE,
+	CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_SVS_VOTE     : SVS vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE   : Turbo vote
+ */
+enum cam_vote_level {
+	CAM_SUSPEND_VOTE,
+	CAM_SVS_VOTE,
+	CAM_NOMINAL_VOTE,
+	CAM_TURBO_VOTE,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type  : AHB voting type.
+ *          CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ *          CAM_VOTE_DYNAMIC  : vote calculated dynamically using 'freq'
+ *                              and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq  : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+	enum cam_vote_type   type;
+	union {
+		enum cam_vote_level  level;
+		unsigned long        freq;
+	} vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
+ *                    This is the required bandwidth for uncompressed
+ *                    data traffic between hw core and camnoc.
+ * @compressed_bw   : Bus bandwidth required in Bytes for compressed data.
+ *                    This is the required bandwidth for compressed
+ *                    data traffic between camnoc and mmnoc.
+ *
+ * If one of the above is not applicable to a hw client, it has to
+ * fill the same values in both.
+ *
+ */
+struct cam_axi_vote {
+	uint64_t   uncompressed_bw;
+	uint64_t   compressed_bw;
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ *     bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ *     to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote     : Pointer to ahb vote info
+ * @axi_vote     : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote,
+	struct cam_axi_vote   *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ *     would be removed for this client on this call. Clients should not
+ *     use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ *     to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote      : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote      : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+	uint32_t             client_handle,
+	struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg write with memory barrier
+ * @value         : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                  value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg read with memory barrier
+ * @value         : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                 *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family  : Camera family type. One of
+ *                   CAM_FAMILY_CAMERA_SS
+ *                   CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+	uint32_t                 *camera_family,
+	struct cam_hw_version    *camera_version);
+
+#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
new file mode 100644
index 0000000..b6e2d09
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
new file mode 100644
index 0000000..c304eed
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -0,0 +1,1299 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_isp_context.h"
+#include "cam_isp_log.h"
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
+	struct cam_isp_context *ctx_isp,
+	struct cam_isp_hw_done_event_data *done,
+	uint32_t bubble_state)
+{
+	int rc = 0;
+	int i, j;
+	struct cam_ctx_request  *req;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->active_req_list)) {
+		CDBG("Buf done with no active request!\n");
+		goto end;
+	}
+
+	CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+
+	req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	for (i = 0; i < done->num_handles; i++) {
+		for (j = 0; j < req_isp->num_fence_map_out; j++) {
+			if (done->resource_handle[i] ==
+				req_isp->fence_map_out[j].resource_handle)
+			break;
+		}
+
+		if (j == req_isp->num_fence_map_out) {
+			pr_err("Can not find matching lane handle 0x%x!\n",
+				done->resource_handle[i]);
+			rc = -EINVAL;
+			continue;
+		}
+
+		if (!bubble_state) {
+			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+		} else if (!req_isp->bubble_report) {
+			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR);
+		} else {
+			/*
+			 * Ignore the buffer done if bubble detect is on
+			 * In most case, active list should be empty when
+			 * bubble detects. But for safety, we just move the
+			 * current active request to the pending list here.
+			 */
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			continue;
+		}
+
+		CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+			   req->request_id,
+			   req_isp->fence_map_out[j].sync_id);
+		req_isp->num_acked++;
+		req_isp->fence_map_out[j].sync_id = -1;
+	}
+
+	if (req_isp->num_acked == req_isp->num_fence_map_out) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("Reg upd ack with no pending request\n");
+		goto end;
+	}
+	req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	list_del_init(&req->list);
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp->num_fence_map_out != 0) {
+		CDBG("%s: move request %lld to active list\n", __func__,
+			req->request_id);
+		if (!list_empty(&ctx->active_req_list))
+			pr_err("%s: More than one entry in active list\n",
+				__func__);
+		list_add_tail(&req->list, &ctx->active_req_list);
+	} else {
+		/* no io config, so the request is completed. */
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	/*
+	 * This function only called directly from applied and bubble applied
+	 * state so change substate here.
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_notify_sof_in_actived_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_req_mgr_sof_notify  notify;
+	struct cam_context *ctx = ctx_isp->base;
+
+	/* notify reqmgr with sof  signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify SOF to CRM\n", __func__);
+	}
+
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	CDBG("%s: Enter\n", __func__);
+	ctx_isp->frame_id++;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request *req;
+	struct cam_isp_ctx_req *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (ctx->state != CAM_CTX_ACTIVATED) {
+		CDBG("%s: invalid RUP\n", __func__);
+		goto end;
+	}
+
+	/*
+	 * This is for the first update. The initial setting will
+	 * cause the reg_upd in the first frame.
+	 */
+	if (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (req_isp->num_fence_map_out == req_isp->num_acked)
+			list_add_tail(&req->list, &ctx->free_req_list);
+		else {
+			/* need to handle the buf done */
+			list_add_tail(&req->list, &ctx->active_req_list);
+			ctx_isp->substate_activated =
+				CAM_ISP_CTX_ACTIVATED_EPOCH;
+		}
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		pr_err("%s: No pending request\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		pr_err("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+end:
+	return rc;
+}
+
+
+static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	ctx_isp->frame_id++;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	pr_err("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_sof_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+
+static int __cam_isp_ctx_epoch_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	/*
+	 * This means we missed the reg upd ack. So we need to
+	 * transition to BUBBLE state again.
+	 */
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * Just go back to the bubble state.
+		 */
+		pr_err("%s: No pending request.\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+end:
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int                              rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_req_mgr_error_notify  notify;
+
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_isp_hw_error_event_data  *error_event_data =
+			(struct cam_isp_hw_error_event_data *)evt_data;
+
+	uint32_t error_type = error_event_data->error_type;
+
+	CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
+		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
+		notify.error = CRM_KMD_ERR_FATAL;
+
+	/*
+	 * Need to check the active req
+	 * move all of them to the pending request list
+	 * Note this funciton need revisit!
+	 */
+
+	if (list_empty(&ctx->active_req_list)) {
+		pr_err("handling error with no active request!\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+
+		ctx->ctx_crm_intf->notify_err(&notify);
+		pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+		rc = -EFAULT;
+	}
+
+	list_del_init(&req->list);
+	list_add(&req->list, &ctx->pending_req_list);
+	/* might need to check if active list is empty */
+
+end:
+	CDBG("%s: Exit\n", __func__);
+	return rc;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_sof,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_epoch,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* Bubble Applied */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble_applied,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_bubble_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_apply_req_in_activated_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
+	uint32_t next_state)
+{
+	int rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp;
+	struct cam_hw_config_args        cfg;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("%s: No available request for Apply id %lld\n",
+			__func__, apply->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+
+	/*
+	 * Check whehter the request id is matching the tip, if not, this means
+	 * we are in the middle of the error handling. Need to reject this apply
+	 */
+	if (req->request_id != apply->request_id) {
+		rc = -EFAULT;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+
+	req_isp->bubble_report = apply->report_if_bubble;
+
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.num_hw_update_entries = req_isp->num_cfg;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		pr_err("%s: Can not apply the configuration\n", __func__);
+	} else {
+		spin_lock(&ctx->lock);
+		ctx_isp->substate_activated = next_state;
+		CDBG("%s: new state %d\n", __func__, next_state);
+		spin_unlock(&ctx->lock);
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_sof(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_epoch(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_bubble(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_sof,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_epoch,
+		},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_bubble,
+		},
+		.irq_ops = NULL,
+	},
+	/* Bubble Applied */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+
+/* top level state machine */
+static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_release_args       rel_arg;
+	struct cam_ctx_request	        *req;
+	struct cam_isp_ctx_req	        *req_isp;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (ctx_isp->hw_ctx) {
+		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&rel_arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->crm_ctx_intf = NULL;
+	ctx_isp->frame_id = 0;
+
+	/*
+	 * Ideally, we should never have any active request here.
+	 * But we still add some sanity check code here to help the debug
+	 */
+	if (!list_empty(&ctx->active_req_list))
+		pr_err("%s: Active list is empty.\n", __func__);
+
+	/* flush the pending list */
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		pr_err("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_top_state(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request           *req = NULL;
+	struct cam_isp_ctx_req           *req_isp;
+	uint64_t                          packet_addr;
+	struct cam_packet                *packet;
+	size_t                            len = 0;
+	struct cam_hw_prepare_update_args cfg;
+	struct cam_req_mgr_add_request    add_req;
+	struct cam_isp_context           *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: get free request object......\n", __func__);
+
+	/* get free request */
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		pr_err("%s: No more request obj free\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		(uint64_t *) &packet_addr, &len);
+	if (rc != 0) {
+		pr_err("%s: Can not get packet address\n", __func__);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
+	CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
+	CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+		len, cmd->offset);
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+	CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
+	CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.out_map_entries = req_isp->fence_map_out;
+	cfg.in_map_entries = req_isp->fence_map_in;
+
+	CDBG("%s: try to prepare config packet......\n", __func__);
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		pr_err("%s: Prepare config packet failed in HW layer\n",
+			__func__);
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req_isp->num_cfg = cfg.num_hw_update_entries;
+	req_isp->num_fence_map_out = cfg.num_out_map_entries;
+	req_isp->num_fence_map_in = cfg.num_in_map_entries;
+	req_isp->num_acked = 0;
+
+	CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
+		__func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+		req_isp->num_fence_map_in);
+
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+
+	if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+		add_req.link_hdl = ctx->link_hdl;
+		add_req.dev_hdl  = ctx->dev_hdl;
+		add_req.req_id   = req->request_id;
+		rc = ctx->ctx_crm_intf->add_req(&add_req);
+		if (rc) {
+			pr_err("%s: Error: Adding request id=%llu\n", __func__,
+				req->request_id);
+				goto free_req;
+		}
+	}
+
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->pending_req_list);
+	spin_unlock(&ctx->lock);
+
+	CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+		req->request_id);
+
+	return rc;
+
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	spin_unlock(&ctx->lock);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_acquire_args       param;
+	struct cam_isp_resource         *isp_res = NULL;
+	struct cam_create_dev_hdl        req_hdl_param;
+	struct cam_hw_release_args       release;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready!\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
+		 __func__, cmd->session_handle, cmd->num_resources,
+		cmd->handle_type, cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
+		pr_err("Too much resources in the acquire!\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		pr_err("%s: Only user pointer is supported!", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	isp_res = kzalloc(
+		sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
+	if (!isp_res) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	CDBG("%s: start copy %d resources from user\n",
+		__func__, cmd->num_resources);
+
+	if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
+		sizeof(*isp_res)*cmd->num_resources)) {
+		rc = -EFAULT;
+		goto free_res;
+	}
+
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = (uint64_t) isp_res;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		pr_err("Acquire device failed\n");
+		goto free_res;
+	}
+
+	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+	req_hdl_param.priv = ctx;
+
+	CDBG("%s: get device handle form bridge\n", __func__);
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		pr_err("Can not create device handle\n");
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+	kfree(isp_res);
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx_isp->hw_ctx = NULL;
+free_res:
+	kfree(isp_res);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+
+	if (!rc && ctx->link_hdl)
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc = 0;
+
+	CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+
+	ctx->link_hdl = link->link_hdl;
+	ctx->ctx_crm_intf = link->crm_cb;
+
+	/* change state only if we had the init config */
+	if (!list_empty(&ctx->pending_req_list))
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *dev_info)
+{
+	int rc = 0;
+
+	dev_info->dev_hdl = ctx->dev_hdl;
+	strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
+	dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
+	dev_info->p_delay = 1;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args         arg;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (cmd->session_handle != ctx->session_hdl ||
+		cmd->dev_handle != ctx->dev_hdl) {
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/* should never happen */
+		pr_err("%s: Start device with empty configuration\n",
+			__func__);
+		rc = -EFAULT;
+		goto end;
+	} else {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	}
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	if (!ctx_isp->hw_ctx) {
+		pr_err("%s:%d: Wrong hw context pointer.\n",
+			__func__, __LINE__);
+		rc = -EFAULT;
+		goto end;
+	}
+	arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	arg.hw_update_entries = req_isp->cfg;
+	arg.num_hw_update_entries = req_isp->num_cfg;
+
+	ctx_isp->frame_id = 0;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	/*
+	 * Only place to change state before calling the hw due to
+	 * hardware tasklet has higher priority that can cause the
+	 * irq handling comes early
+	 */
+	ctx->state = CAM_CTX_ACTIVATED;
+	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	if (rc) {
+		/* HW failure. user need to clean up the resource */
+		pr_err("Start HW failed\n");
+		ctx->state = CAM_CTX_READY;
+		goto end;
+	}
+	CDBG("%s: start device success\n", __func__);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated_unlock(
+	struct cam_context *ctx)
+{
+	int rc = 0;
+	uint32_t i;
+	struct cam_hw_stop_args          stop;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	/* Mask off all the incoming hardware events */
+	spin_lock(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock(&ctx->lock);
+	CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+
+	/* stop hw first */
+	if (ctx_isp->hw_ctx) {
+		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in active list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx_isp->frame_id = 0;
+
+	CDBG("%s: next state %d", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	ctx->state = CAM_CTX_ACQUIRED;
+	return rc;
+}
+
+static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+
+	if (ctx_isp->hw_ctx) {
+		struct cam_hw_release_args   arg;
+
+		arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	ctx->state =  CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: Enter: apply req in Substate %d\n",
+		__func__, ctx_isp->substate_activated);
+	if (ctx_isp->substate_machine[ctx_isp->substate_activated].
+		crm_ops.apply_req) {
+		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
+			crm_ops.apply_req(ctx, apply);
+	} else {
+		pr_err("%s: No handle function in activated substate %d\n",
+			__func__, ctx_isp->substate_activated);
+		rc = -EFAULT;
+	}
+
+	if (rc)
+		pr_err("%s: Apply failed in active substate %d\n",
+			__func__, ctx_isp->substate_activated);
+	return rc;
+}
+
+
+
+static int __cam_isp_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	spin_lock(&ctx->lock);
+	CDBG("%s: Enter: State %d Substate %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated);
+	if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+		irq_ops[evt_id]) {
+		rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+			irq_ops[evt_id](ctx_isp, evt_data);
+	} else {
+		CDBG("%s: No handle function for substate %d\n", __func__,
+			ctx_isp->substate_activated);
+	}
+	CDBG("%s: Exit: State %d Substate %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated);
+	spin_unlock(&ctx->lock);
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_acquired,
+		},
+		.crm_ops = {
+			.link = __cam_isp_ctx_link_in_acquired,
+			.unlink = __cam_isp_ctx_unlink_in_acquired,
+			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+		},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.start_dev = __cam_isp_ctx_start_dev_in_ready,
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_ready,
+		},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+			.release_dev = __cam_isp_ctx_release_dev_in_activated,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req,
+		},
+		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_intf)
+
+{
+	int rc = -1;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		pr_err("%s: Invalid Context\n", __func__);
+		goto err;
+	}
+
+	/* ISP context setup */
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+	ctx->frame_id = 0;
+	ctx->hw_ctx = NULL;
+	ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	ctx->substate_machine = cam_isp_ctx_activated_state_machine;
+	ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		ctx->req_base[i].req_priv = &ctx->req_isp[i];
+		ctx->req_isp[i].base = &ctx->req_base[i];
+	}
+
+	/* camera context setup */
+	rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		pr_err("%s: Camera Context Base init failed\n", __func__);
+		goto err;
+	}
+
+	/* link camera context with isp context */
+	ctx_base->state_machine = cam_isp_ctx_top_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_isp_context_deinit(struct cam_isp_context *ctx)
+{
+	int rc = 0;
+
+	if (ctx->base)
+		cam_context_deinit(ctx->base);
+
+	if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
+		pr_err("%s: ISP context substate is invalid\n", __func__);
+
+	memset(ctx, 0, sizeof(*ctx));
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
new file mode 100644
index 0000000..dae1dda
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_CONTEXT_H_
+#define _CAM_ISP_CONTEXT_H_
+
+
+#include <linux/spinlock.h>
+#include <uapi/media/cam_isp.h>
+
+#include "cam_context.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/*
+ * Maximum hw resource - This number is based on the maximum
+ * output port resource. The current maximum resource number
+ * is 20.
+ */
+#define CAM_ISP_CTX_RES_MAX                     20
+
+/*
+ * Maxiimum configuration entry size  - This is based on the
+ * worst case DUAL IFE use case plus some margin.
+ */
+#define CAM_ISP_CTX_CFG_MAX                     20
+
+/* forward declaration */
+struct cam_isp_context;
+
+/* cam isp context irq handling function type */
+typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp,
+	void *evt_data);
+
+/**
+ * enum cam_isp_ctx_activated_substate - sub states for activated
+ *
+ */
+enum cam_isp_ctx_activated_substate {
+	CAM_ISP_CTX_ACTIVATED_SOF,
+	CAM_ISP_CTX_ACTIVATED_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_EPOCH,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_HALT,
+	CAM_ISP_CTX_ACTIVATED_MAX,
+};
+
+
+/**
+ * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops:               Array of handle function pointers.
+ *
+ */
+struct cam_isp_ctx_irq_ops {
+	cam_isp_hw_event_cb_func         irq_ops[CAM_ISP_HW_EVENT_MAX];
+};
+
+/**
+ * struct cam_isp_ctx_req - ISP context request object
+ *
+ * @base:                  Common request object ponter
+ * @cfg:                   ISP hardware configuration array
+ * @num_cfg:               Number of ISP hardware configuration entries
+ * @fence_map_out:         Output fence mapping array
+ * @num_fence_map_out:     Number of the output fence map
+ * @fence_map_in:          Input fence mapping array
+ * @num_fence_map_in:      Number of input fence map
+ * @num_acked:             Count to track acked entried for output.
+ *                         If count equals the number of fence out, it means
+ *                         the request has been completed.
+ * @bubble_report:         Flag to track if bubble report is active on
+ *                         current request
+ *
+ */
+struct cam_isp_ctx_req {
+	struct cam_ctx_request          *base;
+
+	struct cam_hw_update_entry       cfg[CAM_ISP_CTX_CFG_MAX];
+	uint32_t                         num_cfg;
+	struct cam_hw_fence_map_entry    fence_map_out[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_out;
+	struct cam_hw_fence_map_entry    fence_map_in[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_in;
+	uint32_t                         num_acked;
+	int32_t                          bubble_report;
+};
+
+/**
+ * struct cam_isp_context  - ISP context object
+ *
+ * @base:                  Common context object pointer
+ * @frame_id:              Frame id tracking for the isp context
+ * @substate_actiavted:    Current substate for the activated state.
+ * @substate_machine:      ISP substate machine for external interface
+ * @substate_machine_irq:  ISP substate machine for irq handling
+ * @req_base:              Common request object storage
+ * @req_isp:               ISP private request object storage
+ * @hw_ctx:                HW object returned by the acquire device command
+ *
+ */
+struct cam_isp_context {
+	struct cam_context              *base;
+
+	int64_t                          frame_id;
+	uint32_t                         substate_activated;
+	struct cam_ctx_ops              *substate_machine;
+	struct cam_isp_ctx_irq_ops      *substate_machine_irq;
+
+	struct cam_ctx_request           req_base[CAM_CTX_REQ_MAX];
+	struct cam_isp_ctx_req           req_isp[CAM_CTX_REQ_MAX];
+
+	void                            *hw_ctx;
+};
+
+/**
+ * cam_isp_context_init()
+ *
+ * @brief:              Initialization function for the ISP context
+ *
+ * @ctx:                ISP context obj to be initialized
+ * @bridge_ops:         Bridge call back funciton
+ * @hw_intf:            ISP hw manager interface
+ *
+ */
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *bridge_ops,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_isp_context_deinit()
+ *
+ * @brief:               Deinitialize function for the ISP context
+ *
+ * @ctx:                 ISP context obj to be deinitialized
+ *
+ */
+int cam_isp_context_deinit(struct cam_isp_context *ctx);
+
+
+#endif  /* __CAM_ISP_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
new file mode 100644
index 0000000..9768912
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_isp_dev.h"
+#include "cam_isp_log.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_node.h"
+
+static struct cam_isp_dev g_isp_dev;
+
+static const struct of_device_id cam_isp_dt_match[] = {
+	{
+		.compatible = "qcom,cam-isp"
+	},
+	{}
+};
+
+static int cam_isp_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+
+	/* clean up resources */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
+		if (rc)
+			pr_err("%s: ISP context %d deinit failed\n",
+				__func__, i);
+	}
+
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+	if (rc)
+		pr_err("%s: Unregister failed\n", __func__);
+
+	memset(&g_isp_dev, 0, sizeof(g_isp_dev));
+	return 0;
+}
+
+static int cam_isp_dev_probe(struct platform_device *pdev)
+{
+	int rc = -1;
+	int i;
+	struct cam_hw_mgr_intf         hw_mgr_intf;
+	struct cam_node               *node;
+
+	/* Initialze the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+		CAM_IFE_DEVICE_TYPE);
+	if (rc) {
+		pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+		goto err;
+	}
+	node = (struct cam_node *) g_isp_dev.sd.token;
+
+	/* Initialize the context list */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
+			&g_isp_dev.ctx[i],
+			&node->crm_node_intf,
+			&node->hw_mgr_intf);
+		if (rc) {
+			pr_err("%s: ISP context init failed!\n", __func__);
+			goto unregister;
+		}
+	}
+
+	/* Initialize the cam node */
+	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
+		CAM_ISP_DEV_NAME);
+	if (rc) {
+		pr_err("%s: ISP node init failed!\n", __func__);
+		goto unregister;
+	}
+
+	pr_info("%s: Camera ISP probe complete\n", __func__);
+
+	return 0;
+unregister:
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+err:
+	return rc;
+}
+
+
+static struct platform_driver isp_driver = {
+	.probe = cam_isp_dev_probe,
+	.remove = cam_isp_dev_remove,
+	.driver = {
+		.name = "cam_isp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_isp_dt_match,
+	},
+};
+
+static int __init cam_isp_dev_init_module(void)
+{
+	return platform_driver_register(&isp_driver);
+}
+
+static void __exit cam_isp_dev_exit_module(void)
+{
+	platform_driver_unregister(&isp_driver);
+}
+
+module_init(cam_isp_dev_init_module);
+module_exit(cam_isp_dev_exit_module);
+MODULE_DESCRIPTION("MSM ISP driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
new file mode 100644
index 0000000..95463ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_DEV_H_
+#define _CAM_ISP_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_isp_context.h"
+
+/**
+ * struct cam_isp_dev - Camera ISP V4l2 device node
+ *
+ * @sd:                    Commone camera subdevice node
+ * @ctx:                   Isp base context storage
+ * @ctx_isp:               Isp private context storage
+ *
+ */
+struct cam_isp_dev {
+	struct cam_subdev          sd;
+	struct cam_context         ctx[CAM_CTX_MAX];
+	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+};
+
+#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
new file mode 100644
index 0000000..4f5205e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_LOG_H_
+#define _CAM_ISP_LOG_H_
+
+#include <linux/kernel.h>
+
+#define ISP_TRACE_ENABLE			1
+
+#if (ISP_TRACE_ENABLE == 1)
+	#define ISP_TRACE(args...)		trace_printk(args)
+#else
+	#define ISP_TRACE(arg...)
+#endif
+
+#endif /* __CAM_ISP_LOG_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
new file mode 100644
index 0000000..9f2204b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_INTF_H_
+#define _CAM_ISP_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_mgr_intf.h"
+
+/**
+ *  enum cam_isp_hw_event_type - Collection of the ISP hardware events
+ */
+enum cam_isp_hw_event_type {
+	CAM_ISP_HW_EVENT_ERROR,
+	CAM_ISP_HW_EVENT_SOF,
+	CAM_ISP_HW_EVENT_REG_UPDATE,
+	CAM_ISP_HW_EVENT_EPOCH,
+	CAM_ISP_HW_EVENT_EOF,
+	CAM_ISP_HW_EVENT_DONE,
+	CAM_ISP_HW_EVENT_MAX
+};
+
+
+/**
+ * enum cam_isp_hw_err_type - Collection of the ISP error types for
+ *                         ISP hardware event CAM_ISP_HW_EVENT_ERROR
+ */
+enum cam_isp_hw_err_type {
+	CAM_ISP_HW_ERROR_NONE,
+	CAM_ISP_HW_ERROR_OVERFLOW,
+	CAM_ISP_HW_ERROR_P2I_ERROR,
+	CAM_ISP_HW_ERROR_VIOLATION,
+	CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
+	CAM_ISP_HW_ERROR_MAX,
+};
+
+
+/**
+ * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_sof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_reg_update_event_data - Event payload for
+ *                         CAM_HW_EVENT_REG_UPDATE
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_reg_update_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_epoch_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
+ *
+ * @num_handles:           Number of resource handeles
+ * @resource_handle:       Resource handle array
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_done_event_data {
+	uint32_t             num_handles;
+	uint32_t             resource_handle[
+				CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_eof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
+ *
+ * @error_type:            error type for the error event
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_error_event_data {
+	uint32_t             error_type;
+	struct timeval       timestamp;
+};
+
+/**
+ * cam_isp_hw_mgr_init()
+ *
+ * @brief:              Initialization function for the ISP hardware manager
+ *
+ * @of_node:            Device node input
+ * @hw_mgr:             Input/output structure for the ISP hardware manager
+ *                          initialization
+ *
+ */
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr);
+
+#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
new file mode 100644
index 0000000..4e6a06e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += ife_csid_hw/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
new file mode 100644
index 0000000..1615d21f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid_dev.o cam_ife_csid_soc.o cam_ife_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid170.o cam_ife_csid_lite170.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
new file mode 100644
index 0000000..bdd59d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
@@ -0,0 +1,60 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME                    "csid_170"
+#define CAM_CSID_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+	.csid_reg = &cam_ife_csid_170_reg_offset,
+	.hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid170_dt_match[] = {
+	{
+		.compatible = "qcom,csid170",
+		.data = &cam_ife_csid170_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid170_dt_match);
+
+static struct platform_driver cam_ife_csid170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid170_driver);
+}
+
+static void __exit cam_ife_csid170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid170_driver);
+}
+
+module_init(cam_ife_csid170_init_module);
+module_exit(cam_ife_csid170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
new file mode 100644
index 0000000..8ff2a55
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -0,0 +1,295 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_170_H_
+#define _CAM_IFE_CSID_170_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_ipp_reg_offset  cam_ife_csid_170_ipp_reg_offset = {
+	.csid_ipp_irq_status_addr            = 0x30,
+	.csid_ipp_irq_mask_addr              = 0x34,
+	.csid_ipp_irq_clear_addr             = 0x38,
+	.csid_ipp_irq_set_addr               = 0x3c,
+
+	.csid_ipp_cfg0_addr                  = 0x200,
+	.csid_ipp_cfg1_addr                  = 0x204,
+	.csid_ipp_ctrl_addr                  = 0x208,
+	.csid_ipp_frm_drop_pattern_addr      = 0x20c,
+	.csid_ipp_frm_drop_period_addr       = 0x210,
+	.csid_ipp_irq_subsample_pattern_addr = 0x214,
+	.csid_ipp_irq_subsample_period_addr  = 0x218,
+	.csid_ipp_hcrop_addr                 = 0x21c,
+	.csid_ipp_vcrop_addr                 = 0x220,
+	.csid_ipp_pix_drop_pattern_addr      = 0x224,
+	.csid_ipp_pix_drop_period_addr       = 0x228,
+	.csid_ipp_line_drop_pattern_addr     = 0x22c,
+	.csid_ipp_line_drop_period_addr      = 0x230,
+	.csid_ipp_rst_strobes_addr           = 0x240,
+	.csid_ipp_status_addr                = 0x254,
+	.csid_ipp_misr_val_addr              = 0x258,
+	.csid_ipp_format_measure_cfg0_addr   = 0x270,
+	.csid_ipp_format_measure_cfg1_addr   = 0x274,
+	.csid_ipp_format_measure0_addr       = 0x278,
+	.csid_ipp_format_measure1_addr       = 0x27c,
+	.csid_ipp_format_measure2_addr       = 0x280,
+	.csid_ipp_timestamp_curr0_sof_addr   = 0x290,
+	.csid_ipp_timestamp_curr1_sof_addr   = 0x294,
+	.csid_ipp_timestamp_perv0_sof_addr   = 0x298,
+	.csid_ipp_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_ipp_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_ipp_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_ipp_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_ipp_timestamp_perv1_eof_addr   = 0x2ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+			cam_ife_csid_170_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+			cam_ife_csid_170_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+static struct cam_ife_csid_common_reg_offset
+			cam_ife_csid_170_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 3,
+	.no_pix                                       = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_170_csi2_reg_offset,
+	.ipp_reg          = &cam_ife_csid_170_ipp_reg_offset,
+	.rdi_reg = {
+		&cam_ife_csid_170_rdi_0_reg_offset,
+		&cam_ife_csid_170_rdi_1_reg_offset,
+		&cam_ife_csid_170_rdi_2_reg_offset,
+		NULL,
+		},
+	.tpg_reg = &cam_ife_csid_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
new file mode 100644
index 0000000..6306df3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -0,0 +1,2554 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_ife_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+
+/* Timeout value in msec */
+#define IFE_CSID_TIMEOUT                               1000
+
+/* TPG VC/DT values */
+#define CAM_IFE_CSID_TPG_VC_VAL                        0xA
+#define CAM_IFE_CSID_TPG_DT_VAL                        0x2B
+
+/* Timeout values in usec */
+#define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
+#define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
+
+static int cam_ife_csid_is_ipp_format_supported(
+				uint32_t decode_fmt)
+{
+	int rc = -EINVAL;
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_DPCM_10_6_10:
+	case CAM_FORMAT_DPCM_10_8_10:
+	case CAM_FORMAT_DPCM_12_6_12:
+	case CAM_FORMAT_DPCM_12_8_12:
+	case CAM_FORMAT_DPCM_14_8_14:
+	case CAM_FORMAT_DPCM_14_10_14:
+		rc = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int cam_ife_csid_get_format(uint32_t  res_id,
+	uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
+		res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
+		*path_fmt = 0xf;
+		return 0;
+	}
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*path_fmt  = 0;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*path_fmt  = 1;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*path_fmt  = 2;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*path_fmt  = 3;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		*path_fmt  = 4;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		*path_fmt  = 5;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_20:
+		*path_fmt  = 6;
+		*plain_fmt = 2;
+		break;
+	case CAM_FORMAT_DPCM_10_6_10:
+		*path_fmt  = 7;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_10_8_10:
+		*path_fmt  = 8;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_6_12:
+		*path_fmt  = 9;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_8_12:
+		*path_fmt  = 0xA;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_8_14:
+		*path_fmt  = 0xB;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_10_14:
+		*path_fmt  = 0xC;
+		*plain_fmt = 1;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported format\n",
+		__func__, __LINE__, decode_fmt);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
+	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
+	uint32_t res_type)
+{
+	int  rc = 0;
+	struct cam_ife_csid_cid_data    *cid_data;
+	uint32_t  i = 0, j = 0;
+
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		if (csid_hw->cid_res[i].res_state >=
+			CAM_ISP_RESOURCE_STATE_RESERVED) {
+			cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[i].res_priv;
+			if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+				if (cid_data->tpg_set) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			} else {
+				if (cid_data->vc == vc && cid_data->dt == dt) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (i == CAM_IFE_CSID_CID_RES_MAX) {
+		if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			pr_err("%s:%d:CSID:%d TPG CID not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+
+		for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
+			if (csid_hw->cid_res[j].res_state ==
+				CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+				cid_data = (struct cam_ife_csid_cid_data *)
+					csid_hw->cid_res[j].res_priv;
+				cid_data->vc  = vc;
+				cid_data->dt  = dt;
+				cid_data->cnt = 1;
+				csid_hw->cid_res[j].res_state =
+					CAM_ISP_RESOURCE_STATE_RESERVED;
+				*res = &csid_hw->cid_res[j];
+				CDBG("%s:%d:CSID:%d CID %d allocated\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					csid_hw->cid_res[j].res_id);
+				break;
+			}
+		}
+
+		if (j == CAM_IFE_CSID_CID_RES_MAX) {
+			pr_err("%s:%d:CSID:%d Free cid is not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
+{
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	int rc = 0;
+	uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
+		irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	init_completion(&csid_hw->csid_top_complete);
+
+	/* Save interrupt mask registers values*/
+	irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+	}
+
+	/* Mask all interrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* enable the IPP and RDI format measure */
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
+
+	/* perform the top CSID HW reset */
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	CDBG("%s:%d: Waiting for reset complete from irq handler\n",
+		__func__, __LINE__);
+
+	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	/*restore all interrupt masks */
+	cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_reset_cfg_args  *reset)
+{
+	int rc = 0;
+	struct cam_hw_soc_info              *soc_info;
+	struct cam_isp_resource_node        *res;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t  reset_strb_addr, reset_strb_val, val, id;
+	struct completion  *complete;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res      = reset->node_res;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		if (!csid_reg->ipp_reg) {
+			pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
+		complete = &csid_hw->csid_ipp_complete;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	} else {
+		id = res->res_id;
+		if (!csid_reg->rdi_reg[id]) {
+			pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr =
+			csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+		complete =
+			&csid_hw->csid_rdin_complete[id];
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	init_completion(complete);
+	reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
+
+	/* Enable the Test gen before reset */
+	cam_io_w_mb(1,	csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+	/* Reset the corresponding ife csid path */
+	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+				reset_strb_addr);
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id,  rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable Test Gen after reset*/
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+end:
+	return rc;
+
+}
+
+static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
+{
+	int rc = 0;
+	struct cam_ife_csid_cid_data       *cid_data;
+
+	CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		cid_reserv->in_port->res_type,
+		cid_reserv->in_port->lane_type,
+		cid_reserv->in_port->lane_num,
+		cid_reserv->in_port->dt,
+		cid_reserv->in_port->vc);
+
+	if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
+		pr_err("%s:%d:CSID:%d  Invalid phy sel %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d  Invalid lane type %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
+		cid_reserv->in_port->lane_num > 4) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+	if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+		cid_reserv->in_port->lane_num > 3) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* CSID  CSI2 v2.0 supports 31 vc  */
+	if (cid_reserv->in_port->dt > 0x3f ||
+		cid_reserv->in_port->vc > 0x1f) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
+		(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
+		cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
+		pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->format);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (csid_hw->csi2_reserve_cnt) {
+		/* current configure res type should match requested res type */
+		if (csid_hw->res_type != cid_reserv->in_port->res_type) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+			if (csid_hw->csi2_rx_cfg.lane_cfg !=
+				cid_reserv->in_port->lane_cfg  ||
+				csid_hw->csi2_rx_cfg.lane_type !=
+				cid_reserv->in_port->lane_type ||
+				csid_hw->csi2_rx_cfg.lane_num !=
+				cid_reserv->in_port->lane_num) {
+				rc = -EINVAL;
+				goto end;
+				}
+		} else {
+			if (csid_hw->tpg_cfg.decode_fmt !=
+				cid_reserv->in_port->format     ||
+				csid_hw->tpg_cfg.width !=
+				cid_reserv->in_port->left_width ||
+				csid_hw->tpg_cfg.height !=
+				cid_reserv->in_port->height     ||
+				csid_hw->tpg_cfg.test_pattern !=
+				cid_reserv->in_port->test_pattern) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	if (!csid_hw->csi2_reserve_cnt) {
+		csid_hw->res_type = cid_reserv->in_port->res_type;
+		/* Take the first CID resource*/
+		csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+		cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[0].res_priv;
+
+		csid_hw->csi2_rx_cfg.lane_cfg =
+			cid_reserv->in_port->lane_cfg;
+		csid_hw->csi2_rx_cfg.lane_type =
+			cid_reserv->in_port->lane_type;
+		csid_hw->csi2_rx_cfg.lane_num =
+			cid_reserv->in_port->lane_num;
+
+		if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			csid_hw->csi2_rx_cfg.phy_sel = 0;
+			if (cid_reserv->in_port->format >
+			    CAM_FORMAT_MIPI_RAW_16) {
+				pr_err("%s:%d: Wrong TPG format\n", __func__,
+					__LINE__);
+				rc = -EINVAL;
+				goto end;
+			}
+			csid_hw->tpg_cfg.decode_fmt =
+				cid_reserv->in_port->format;
+			csid_hw->tpg_cfg.width =
+				cid_reserv->in_port->left_width;
+			csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
+			csid_hw->tpg_cfg.test_pattern =
+				cid_reserv->in_port->test_pattern;
+			cid_data->tpg_set = 1;
+		} else {
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->in_port->res_type & 0xFF) - 1;
+		}
+
+		cid_data->vc = cid_reserv->in_port->vc;
+		cid_data->dt = cid_reserv->in_port->dt;
+		cid_data->cnt = 1;
+		cid_reserv->node_res = &csid_hw->cid_res[0];
+		csid_hw->csi2_reserve_cnt++;
+
+		CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->node_res->res_id);
+	} else {
+		rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt,
+			cid_reserv->in_port->res_type);
+		/* if success then increment the reserve count */
+		if (!rc) {
+			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+				pr_err("%s:%d:CSID%d reserve cnt reached max\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx);
+				rc = -EINVAL;
+			} else {
+				csid_hw->csi2_reserve_cnt++;
+				CDBG("%s:%d:CSID:%d CID:%d acquired\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					cid_reserv->node_res->res_id);
+			}
+		}
+	}
+
+end:
+	return rc;
+}
+
+
+static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *reserve)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg    *path_data;
+	struct cam_isp_resource_node    *res;
+
+	/* CSID  CSI2 v2.0 supports 31 vc */
+	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
+		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			reserve->in_port->vc, reserve->in_port->dt,
+			reserve->sync_mode);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				csid_hw->ipp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_ife_csid_is_ipp_format_supported(
+				reserve->in_port->format)) {
+			pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* assign the IPP resource */
+		res = &csid_hw->ipp_res;
+		CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+			break;
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (csid_hw->rdi_res[reserve->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				reserve->res_id,
+				csid_hw->rdi_res[reserve->res_id].res_state);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			res = &csid_hw->rdi_res[reserve->res_id];
+			CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+		}
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, reserve->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	path_data = (struct cam_ife_csid_path_cfg   *)res->res_priv;
+
+	path_data->cid = reserve->cid;
+	path_data->decode_fmt = reserve->in_port->format;
+	path_data->master_idx = reserve->master_idx;
+	path_data->sync_mode = reserve->sync_mode;
+	path_data->height  = reserve->in_port->height;
+	path_data->start_line = reserve->in_port->line_start;
+	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
+		path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
+	} else {
+		path_data->dt = reserve->in_port->dt;
+		path_data->vc = reserve->in_port->vc;
+	}
+
+	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->width  = reserve->in_port->left_width;
+	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->width  = reserve->in_port->right_width;
+	} else
+		path_data->crop_enable = 0;
+
+	reserve->node_res = res;
+
+end:
+	return rc;
+}
+
+static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw  *csid_hw)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t i, status, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* overflow check before increment */
+	if (csid_hw->hw_info->open_count == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	csid_hw->hw_info->open_count++;
+	if (csid_hw->hw_info->open_count > 1) {
+		CDBG("%s:%d: CSID hw has already been enabled\n",
+			__func__, __LINE__);
+		return rc;
+	}
+
+	CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	rc = cam_ife_csid_enable_soc_resources(soc_info);
+	if (rc) {
+		pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+
+	CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_global_reset(csid_hw);
+	if (rc) {
+		pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
+			 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/*
+	 * Reset the SW registers
+	 * SW register reset also reset the mask irq, so poll the irq status
+	 * to check the reset complete.
+	 */
+	CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		pr_err("%s:%d: software register reset timeout.....\n",
+			__func__, __LINE__);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_hw_version_addr);
+	CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, val);
+
+	return 0;
+
+disable_soc:
+	cam_ife_csid_disable_soc_resources(soc_info);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+	csid_hw->hw_info->open_count--;
+	return rc;
+}
+
+static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = 0;
+	struct cam_hw_soc_info             *soc_info;
+	struct cam_ife_csid_reg_offset     *csid_reg;
+
+
+	/*  Decrement ref Count */
+	if (csid_hw->hw_info->open_count)
+		csid_hw->hw_info->open_count--;
+	if (csid_hw->hw_info->open_count)
+		return rc;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	/*disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_disable_soc_resources(soc_info);
+	if (rc)
+		pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	return rc;
+}
+
+
+static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	uint32_t  val = 0;
+	struct cam_hw_soc_info    *soc_info;
+
+	csid_hw->tpg_start_cnt++;
+	if (csid_hw->tpg_start_cnt == 1) {
+		/*Enable the TPG */
+		CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		soc_info = &csid_hw->hw_info->soc_info;
+		{
+			uint32_t val;
+			uint32_t i;
+			uint32_t base = 0x600;
+
+			CDBG("%s:%d: ================== TPG ===============\n",
+				__func__, __LINE__);
+			for (i = 0; i < 16; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== IPP ===============\n",
+				__func__, __LINE__);
+			base = 0x200;
+			for (i = 0; i < 10; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== RX ===============\n",
+				__func__, __LINE__);
+			base = 0x100;
+			for (i = 0; i < 5; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+		}
+
+		CDBG("%s:%d: =============== TPG control ===============\n",
+			__func__, __LINE__);
+		val = (4 << 20);
+		val |= (0x80 << 8);
+		val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
+		val |= 7;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_hw->csid_info->csid_reg->tpg_reg->
+			csid_tpg_ctrl_addr);
+
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
+		CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
+			0x600, val);
+	}
+
+	return 0;
+}
+
+static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_hw_soc_info    *soc_info;
+
+	if (csid_hw->tpg_start_cnt)
+		csid_hw->tpg_start_cnt--;
+
+	if (csid_hw->tpg_start_cnt)
+		return 0;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* disable the TPG */
+	if (!csid_hw->tpg_start_cnt) {
+		CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		/*stop the TPG */
+		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+	}
+
+	return 0;
+}
+
+
+static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_ife_csid_reg_offset *csid_reg;
+	struct cam_hw_soc_info         *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	CDBG("%s:%d CSID:%d TPG config\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx);
+
+	/* configure one DT, infinite frames */
+	val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
+
+	/* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
+	val = (0x740 << 12) | 0x740;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
+
+	cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
+
+	val = csid_hw->tpg_cfg.width << 16 |
+		csid_hw->tpg_cfg.height;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
+
+	cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
+
+	/*
+	 * decode_fmt is the same as the input resource format.
+	 * it is one larger than the register spec format.
+	 */
+	val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
+
+	/* select rotate period as  5 frame */
+	val =  5 << 8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
+	/* config pix pattern */
+	cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_enable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_cid_data         *cid_data;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	/* overflow check before increment */
+	if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
+
+	res->res_state  = CAM_ISP_RESOURCE_STATE_STREAMING;
+	csid_hw->csi2_cfg_cnt++;
+	if (csid_hw->csi2_cfg_cnt > 1)
+		return rc;
+
+	/* rx cfg0 */
+	val = (csid_hw->csi2_rx_cfg.lane_num - 1)  |
+		(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+		(csid_hw->csi2_rx_cfg.lane_type << 24);
+	val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+	/* rx cfg1*/
+	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+	/* if VC value is more than 3 than set full width of VC */
+	if (cid_data->vc > 3)
+		val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
+	/* enable packet ecc correction */
+	val |= 1;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		/* Config the TPG */
+		rc = cam_ife_csid_config_tpg(csid_hw, res);
+		if (rc) {
+			res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+			return rc;
+		}
+	}
+
+	/*Enable the CSI2 rx inerrupts */
+	val = CSID_CSI2_RX_INFO_RST_DONE |
+		CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
+		pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	if (csid_hw->csi2_cfg_cnt)
+		csid_hw->csi2_cfg_cnt--;
+
+	if (csid_hw->csi2_cfg_cnt)
+		return 0;
+
+	/*Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static int cam_ife_csid_init_config_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_format = 0, val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg  *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_format);
+	if (rc)
+		return rc;
+
+	/**
+	 * configure the IPP and enable the time stamp capture.
+	 * enable the HW measrurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 1) | 1;
+	val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_vcrop_addr);
+	}
+
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
+	/* set irq sub sample pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
+
+	/*Set master or slave IPP */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* Enable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		rc = -EINVAL;
+	}
+
+	/* Disable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info            *soc_info;
+	struct cam_ife_csid_path_cfg      *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+
+	/*Resume at frame boundary */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
+		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	}
+	/* for slave mode, not need to resume for slave device */
+
+	/* Enable the required ipp interrupts */
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd       stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_path_cfg         *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		/* configure Halt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val &= ~0x3;
+		val |= stop_cmd;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+		cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* For slave mode, halt command should take it from master */
+
+	/* Enable the EOF interrupt for resume at boundary case */
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		init_completion(&csid_hw->csid_ipp_complete);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_init_config_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	id = res->res_id;
+	if (!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+		return -EINVAL;
+	}
+
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_fmt);
+	if (rc)
+		return rc;
+
+	/**
+	 * RDI path config and enable the time stamp capture
+	 * Enable the measurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+		(path_data->crop_enable & 1 <<
+			csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 2) | 3;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+	}
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
+	/* set IRQ sum sabmple */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
+
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
+
+	/* Configure the halt mode */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the RPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	/* Disable the RDI path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t id, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	/*resume at frame boundary */
+	cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the required RDI interrupts */
+	val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+
+static int cam_ife_csid_disable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd                stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
+		!csid_reg->rdi_reg[res->res_id]) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	init_completion(&csid_hw->csid_rdin_complete[id]);
+
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	/*Halt the RDI path */
+	cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_get_time_stamp(
+		struct cam_ife_csid_hw   *csid_hw, void *cmd_args)
+{
+	struct cam_csid_get_time_stamp_args  *time_stamp;
+	struct cam_isp_resource_node         *res;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  time_32, id;
+
+	time_stamp = (struct cam_csid_get_time_stamp_args  *)cmd_args;
+	res = time_stamp->node_res;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	} else {
+		id = res->res_id;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	}
+
+	return 0;
+}
+static int cam_ife_csid_res_wait_for_halt(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	struct completion  *complete;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+		complete = &csid_hw->csid_ipp_complete;
+	else
+		complete =  &csid_hw->csid_rdin_complete[res->res_id];
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, rc);
+		if (rc == 0)
+			/* continue even have timeout */
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable the interrupt */
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		id = res->res_id;
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+			CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+	/* set state to init HW */
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	return rc;
+}
+
+static int cam_ife_csid_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw_caps     *hw_caps;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
+
+	hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
+	hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
+	hw_caps->major_version = csid_reg->cmn_reg->major_version;
+	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+	CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+		hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
+		hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_ife_csid_reset(void *hw_priv,
+	void *reset_args, uint32_t arg_size)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_csid_reset_cfg_args  *reset;
+	int rc = 0;
+
+	if (!hw_priv || !reset_args || (arg_size !=
+		sizeof(struct cam_csid_reset_cfg_args))) {
+		pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reset   = (struct cam_csid_reset_cfg_args  *)reset_args;
+
+	switch (reset->reset_type) {
+	case CAM_IFE_CSID_RESET_GLOBAL:
+		rc = cam_ife_csid_global_reset(csid_hw);
+		break;
+	case CAM_IFE_CSID_RESET_PATH:
+		rc = cam_ife_csid_path_reset(csid_hw, reset);
+		break;
+	default:
+		pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
+			__LINE__, reset->reset_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_csid_hw_reserve_resource_args  *reserv;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_csid_hw_reserve_resource_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reserv = (struct cam_csid_hw_reserve_resource_args  *)reserve_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (reserv->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		rc = cam_ife_csid_path_reserve(csid_hw, reserv);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_isp_resource_node    *res;
+	struct cam_ife_csid_cid_data    *cid_data;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)release_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CDBG("%s:%d:CSID:%d res type:%d Res %d  in released state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		goto end;
+	}
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		cid_data = (struct cam_ife_csid_cid_data    *) res->res_priv;
+		if (cid_data->cnt)
+			cid_data->cnt--;
+
+		if (!cid_data->cnt)
+			res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		if (csid_hw->csi2_reserve_cnt)
+			csid_hw->csi2_reserve_cnt--;
+
+		if (!csid_hw->csi2_reserve_cnt)
+			memset(&csid_hw->csi2_rx_cfg, 0,
+				sizeof(struct cam_ife_csid_csi2_rx_cfg));
+
+		CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		break;
+	}
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res      = (struct cam_isp_resource_node *)init_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+
+	/* Initialize the csid hardware */
+	rc = cam_ife_csid_enable_hw(csid_hw);
+	if (rc)
+		goto end;
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_enable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+
+	if (rc)
+		cam_ife_csid_disable_hw(csid_hw);
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	res = (struct cam_isp_resource_node *)deinit_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		goto end;
+	}
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_disable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		goto end;
+	}
+
+	/* Disable CSID HW */
+	cam_ife_csid_disable_hw(csid_hw);
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)start_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		if (csid_hw->res_type ==  CAM_ISP_IFE_IN_RES_TPG)
+			rc = cam_ife_csid_tpg_start(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res;
+	struct cam_csid_hw_stop_args         *csid_stop;
+	uint32_t  i;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	csid_stop = (struct cam_csid_hw_stop_args  *) stop_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	/* Stop the resource first */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_CID:
+			if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+				rc = cam_ife_csid_tpg_stop(csid_hw, res);
+			break;
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+				rc = cam_ife_csid_disable_ipp_path(csid_hw,
+						res, csid_stop->stop_cmd);
+			else
+				rc = cam_ife_csid_disable_rdi_path(csid_hw,
+						res, csid_stop->stop_cmd);
+
+			break;
+		default:
+			pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
+				__LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
+	/*wait for the path to halt */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
+			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+	}
+
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+
+}
+
+static int cam_ife_csid_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+
+	return -EINVAL;
+}
+
+static int cam_ife_csid_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+	return -EINVAL;
+}
+
+static int cam_ife_csid_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+
+	if (!hw_priv || !cmd_args) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (cmd_type) {
+	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
+		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+
+	return rc;
+
+}
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
+		irq_status_rdi[4];
+
+	csid_hw = (struct cam_ife_csid_hw *)data;
+
+	CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	if (!data) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return IRQ_HANDLED;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* read */
+	irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+	irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_status_addr);
+
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+	/* clear */
+	cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
+		irq_status_rx);
+	CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
+		irq_status_ipp);
+
+	if (irq_status_top) {
+		CDBG("%s:%d: CSID global reset complete......Exit\n",
+			__func__, __LINE__);
+		complete(&csid_hw->csid_top_complete);
+		return IRQ_HANDLED;
+	}
+
+
+	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
+		CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+		complete(&csid_hw->csid_csi2_complete);
+	}
+
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d TG OVER  FLOW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+
+	/*read the IPP errors */
+	if (csid_reg->cmn_reg->no_pix) {
+		/* IPP reset done bit */
+		if (irq_status_ipp &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s%d: CSID IPP reset complete\n",
+				__func__, __LINE__);
+			complete(&csid_hw->csid_ipp_complete);
+		}
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
+			CDBG("%s: CSID IPP SOF received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
+			CDBG("%s: CSID IPP SOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
+			CDBG("%s: CSID IPP EOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			CDBG("%s: CSID IPP EOF received\n", __func__);
+
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_ipp_complete);
+
+		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop IPP path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		if (irq_status_rdi[i] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s:%d: CSID rdi%d reset complete\n",
+				__func__, __LINE__, i);
+			complete(&csid_hw->csid_rdin_complete[i]);
+		}
+
+		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_rdin_complete[i]);
+
+		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop RDI path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+		}
+	}
+
+	CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+	return IRQ_HANDLED;
+}
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+	struct cam_ife_csid_path_cfg         *path_data;
+	struct cam_ife_csid_cid_data         *cid_data;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_ife_csid_hw               *ife_csid_hw = NULL;
+
+	if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
+		pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
+			csid_idx);
+		return rc;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *) csid_hw_intf->hw_priv;
+	ife_csid_hw  = (struct cam_ife_csid_hw  *) csid_hw_info->core_info;
+
+	ife_csid_hw->hw_intf = csid_hw_intf;
+	ife_csid_hw->hw_info = csid_hw_info;
+
+	CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+		ife_csid_hw->hw_intf->hw_type, csid_idx);
+
+
+	ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ife_csid_hw->hw_info->hw_mutex);
+	spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
+	init_completion(&ife_csid_hw->hw_info->hw_complete);
+
+	init_completion(&ife_csid_hw->csid_top_complete);
+	init_completion(&ife_csid_hw->csid_csi2_complete);
+	init_completion(&ife_csid_hw->csid_ipp_complete);
+	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
+		init_completion(&ife_csid_hw->csid_rdin_complete[i]);
+
+
+	rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
+			cam_ife_csid_irq, ife_csid_hw);
+	if (rc < 0) {
+		pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
+			csid_idx);
+		goto err;
+	}
+
+	ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
+	ife_csid_hw->hw_intf->hw_ops.init        = cam_ife_csid_init_hw;
+	ife_csid_hw->hw_intf->hw_ops.deinit      = cam_ife_csid_deinit_hw;
+	ife_csid_hw->hw_intf->hw_ops.reset       = cam_ife_csid_reset;
+	ife_csid_hw->hw_intf->hw_ops.reserve     = cam_ife_csid_reserve;
+	ife_csid_hw->hw_intf->hw_ops.release     = cam_ife_csid_release;
+	ife_csid_hw->hw_intf->hw_ops.start       = cam_ife_csid_start;
+	ife_csid_hw->hw_intf->hw_ops.stop        = cam_ife_csid_stop;
+	ife_csid_hw->hw_intf->hw_ops.read        = cam_ife_csid_read;
+	ife_csid_hw->hw_intf->hw_ops.write       = cam_ife_csid_write;
+	ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
+
+	/*Initialize the CID resoure */
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
+		ife_csid_hw->cid_res[i].res_id = i;
+		ife_csid_hw->cid_res[i].res_state  =
+					CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
+					GFP_KERNEL);
+		if (!cid_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->cid_res[i].res_priv = cid_data;
+	}
+
+	/* Initialize the IPP resources */
+	if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
+		ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+		ife_csid_hw->ipp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->ipp_res.res_priv = path_data;
+	}
+
+	/* Initialize the RDI resource */
+	for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+				i++) {
+		/* res type is from RDI 0 to RDI3 */
+		ife_csid_hw->rdi_res[i].res_type =
+			CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->rdi_res[i].res_id = i;
+		ife_csid_hw->rdi_res[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->rdi_res[i].res_priv = path_data;
+	}
+
+	return 0;
+err:
+	if (rc) {
+		kfree(ife_csid_hw->ipp_res.res_priv);
+		for (i = 0; i <
+			ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
+			kfree(ife_csid_hw->rdi_res[i].res_priv);
+
+		for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+			kfree(ife_csid_hw->cid_res[i].res_priv);
+
+	}
+
+	return rc;
+}
+
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+
+	if (!ife_csid_hw) {
+		pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(ife_csid_hw->ipp_res.res_priv);
+	for (i = 0; i <
+		ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+		i++) {
+		kfree(ife_csid_hw->rdi_res[i].res_priv);
+	}
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+		kfree(ife_csid_hw->cid_res[i].res_priv);
+
+
+	return 0;
+}
+
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
new file mode 100644
index 0000000..d36c576
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -0,0 +1,419 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_HW_H_
+#define _CAM_IFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_ife_csid_soc.h"
+
+#define CAM_IFE_CSID_HW_RES_MAX      4
+#define CAM_IFE_CSID_CID_RES_MAX     4
+#define CAM_IFE_CSID_RDI_MAX         4
+
+#define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
+#define CSID_CSI2_RX_NFO_PHY_DL1_EOT_CAPTURED     BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
+#define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
+#define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
+#define CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED    BIT(5)
+#define CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED    BIT(6)
+#define CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED    BIT(7)
+#define CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED       BIT(8)
+#define CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED      BIT(9)
+#define CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED   BIT(10)
+#define CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION     BIT(11)
+#define CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION     BIT(12)
+#define CSID_CSI2_RX_ERROR_CPHY_PH_CRC            BIT(13)
+#define CSID_CSI2_RX_WARNING_ECC                  BIT(14)
+#define CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW    BIT(15)
+#define CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW    BIT(16)
+#define CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW    BIT(17)
+#define CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW    BIT(18)
+#define CSID_CSI2_RX_ERROR_CRC                    BIT(19)
+#define CSID_CSI2_RX_ERROR_ECC                    BIT(20)
+#define CSID_CSI2_RX_ERROR_MMAPPED_VC_DT          BIT(21)
+#define CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT         BIT(22)
+#define CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW       BIT(23)
+#define CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME        BIT(24)
+#define CSID_CSI2_RX_INFO_TG_DONE                 BIT(25)
+#define CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW       BIT(26)
+#define CSID_CSI2_RX_INFO_RST_DONE                BIT(27)
+
+#define CSID_PATH_INFO_RST_DONE                   BIT(1)
+#define CSID_PATH_ERROR_FIFO_OVERFLOW             BIT(2)
+#define CSID_PATH_INFO_SUBSAMPLED_EOF             BIT(3)
+#define CSID_PATH_INFO_SUBSAMPLED_SOF             BIT(4)
+#define CSID_PATH_INFO_FRAME_DROP_EOF             BIT(5)
+#define CSID_PATH_INFO_FRAME_DROP_EOL             BIT(6)
+#define CSID_PATH_INFO_FRAME_DROP_SOL             BIT(7)
+#define CSID_PATH_INFO_FRAME_DROP_SOF             BIT(8)
+#define CSID_PATH_INFO_INPUT_EOF                  BIT(9)
+#define CSID_PATH_INFO_INPUT_EOL                  BIT(10)
+#define CSID_PATH_INFO_INPUT_SOL                  BIT(11)
+#define CSID_PATH_INFO_INPUT_SOF                  BIT(12)
+#define CSID_PATH_ERROR_PIX_COUNT                 BIT(13)
+#define CSID_PATH_ERROR_LINE_COUNT                BIT(14)
+
+enum cam_csid_path_halt_mode {
+	CSID_HALT_MODE_INTERNAL,
+	CSID_HALT_MODE_GLOBAL,
+	CSID_HALT_MODE_MASTER,
+	CSID_HALT_MODE_SLAVE,
+};
+
+
+struct cam_ife_csid_ipp_reg_offset {
+	/*Image pixel path register offsets*/
+	uint32_t csid_ipp_irq_status_addr;
+	uint32_t csid_ipp_irq_mask_addr;
+	uint32_t csid_ipp_irq_clear_addr;
+	uint32_t csid_ipp_irq_set_addr;
+
+	uint32_t csid_ipp_cfg0_addr;
+	uint32_t csid_ipp_cfg1_addr;
+	uint32_t csid_ipp_ctrl_addr;
+	uint32_t csid_ipp_frm_drop_pattern_addr;
+	uint32_t csid_ipp_frm_drop_period_addr;
+	uint32_t csid_ipp_irq_subsample_pattern_addr;
+	uint32_t csid_ipp_irq_subsample_period_addr;
+	uint32_t csid_ipp_hcrop_addr;
+	uint32_t csid_ipp_vcrop_addr;
+	uint32_t csid_ipp_pix_drop_pattern_addr;
+	uint32_t csid_ipp_pix_drop_period_addr;
+	uint32_t csid_ipp_line_drop_pattern_addr;
+	uint32_t csid_ipp_line_drop_period_addr;
+	uint32_t csid_ipp_rst_strobes_addr;
+	uint32_t csid_ipp_status_addr;
+	uint32_t csid_ipp_misr_val_addr;
+	uint32_t csid_ipp_format_measure_cfg0_addr;
+	uint32_t csid_ipp_format_measure_cfg1_addr;
+	uint32_t csid_ipp_format_measure0_addr;
+	uint32_t csid_ipp_format_measure1_addr;
+	uint32_t csid_ipp_format_measure2_addr;
+	uint32_t csid_ipp_timestamp_curr0_sof_addr;
+	uint32_t csid_ipp_timestamp_curr1_sof_addr;
+	uint32_t csid_ipp_timestamp_perv0_sof_addr;
+	uint32_t csid_ipp_timestamp_perv1_sof_addr;
+	uint32_t csid_ipp_timestamp_curr0_eof_addr;
+	uint32_t csid_ipp_timestamp_curr1_eof_addr;
+	uint32_t csid_ipp_timestamp_perv0_eof_addr;
+	uint32_t csid_ipp_timestamp_perv1_eof_addr;
+
+	/* configuration */
+	uint32_t  pix_store_en_shift_val;
+};
+
+struct cam_ife_csid_rdi_reg_offset {
+	uint32_t csid_rdi_irq_status_addr;
+	uint32_t csid_rdi_irq_mask_addr;
+	uint32_t csid_rdi_irq_clear_addr;
+	uint32_t csid_rdi_irq_set_addr;
+
+	/*RDI N register address */
+	uint32_t csid_rdi_cfg0_addr;
+	uint32_t csid_rdi_cfg1_addr;
+	uint32_t csid_rdi_ctrl_addr;
+	uint32_t csid_rdi_frm_drop_pattern_addr;
+	uint32_t csid_rdi_frm_drop_period_addr;
+	uint32_t csid_rdi_irq_subsample_pattern_addr;
+	uint32_t csid_rdi_irq_subsample_period_addr;
+	uint32_t csid_rdi_rpp_hcrop_addr;
+	uint32_t csid_rdi_rpp_vcrop_addr;
+	uint32_t csid_rdi_rpp_pix_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_pix_drop_period_addr;
+	uint32_t csid_rdi_rpp_line_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_line_drop_period_addr;
+	uint32_t csid_rdi_yuv_chroma_conversion_addr;
+	uint32_t csid_rdi_rst_strobes_addr;
+	uint32_t csid_rdi_status_addr;
+	uint32_t csid_rdi_misr_val0_addr;
+	uint32_t csid_rdi_misr_val1_addr;
+	uint32_t csid_rdi_misr_val2_addr;
+	uint32_t csid_rdi_misr_val3_addr;
+	uint32_t csid_rdi_format_measure_cfg0_addr;
+	uint32_t csid_rdi_format_measure_cfg1_addr;
+	uint32_t csid_rdi_format_measure0_addr;
+	uint32_t csid_rdi_format_measure1_addr;
+	uint32_t csid_rdi_format_measure2_addr;
+	uint32_t csid_rdi_timestamp_curr0_sof_addr;
+	uint32_t csid_rdi_timestamp_curr1_sof_addr;
+	uint32_t csid_rdi_timestamp_prev0_sof_addr;
+	uint32_t csid_rdi_timestamp_prev1_sof_addr;
+	uint32_t csid_rdi_timestamp_curr0_eof_addr;
+	uint32_t csid_rdi_timestamp_curr1_eof_addr;
+	uint32_t csid_rdi_timestamp_prev0_eof_addr;
+	uint32_t csid_rdi_timestamp_prev1_eof_addr;
+	uint32_t csid_rdi_byte_cntr_ping_addr;
+	uint32_t csid_rdi_byte_cntr_pong_addr;
+};
+
+struct cam_ife_csid_csi2_rx_reg_offset {
+	uint32_t csid_csi2_rx_irq_status_addr;
+	uint32_t csid_csi2_rx_irq_mask_addr;
+	uint32_t csid_csi2_rx_irq_clear_addr;
+	uint32_t csid_csi2_rx_irq_set_addr;
+	uint32_t csid_csi2_rx_cfg0_addr;
+	uint32_t csid_csi2_rx_cfg1_addr;
+	uint32_t csid_csi2_rx_capture_ctrl_addr;
+	uint32_t csid_csi2_rx_rst_strobes_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg0_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg1_addr; /* */
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_lane0_misr_addr;
+	uint32_t csid_csi2_rx_lane1_misr_addr;
+	uint32_t csid_csi2_rx_lane2_misr_addr;
+	uint32_t csid_csi2_rx_lane3_misr_addr;
+	uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+	uint32_t csid_csi2_rx_stats_ecc_addr;
+	uint32_t csid_csi2_rx_total_crc_err_addr;
+
+	/*configurations */
+	uint32_t csi2_rst_srb_all;
+	uint32_t csi2_rst_done_shift_val;
+	uint32_t csi2_irq_mask_all;
+	uint32_t csi2_misr_enable_shift_val;
+	uint32_t csi2_vc_mode_shift_val;
+};
+
+struct cam_ife_csid_csi2_tpg_reg_offset {
+	uint32_t csid_tpg_ctrl_addr;
+	uint32_t csid_tpg_vc_cfg0_addr;
+	uint32_t csid_tpg_vc_cfg1_addr;
+	uint32_t csid_tpg_lfsr_seed_addr;
+	uint32_t csid_tpg_dt_n_cfg_0_addr;
+	uint32_t csid_tpg_dt_n_cfg_1_addr;
+	uint32_t csid_tpg_dt_n_cfg_2_addr;
+	uint32_t csid_tpg_color_bars_cfg_addr;
+	uint32_t csid_tpg_color_box_cfg_addr;
+	uint32_t csid_tpg_common_gen_cfg_addr;
+	uint32_t csid_tpg_cgen_n_cfg_addr;
+	uint32_t csid_tpg_cgen_n_x0_addr;
+	uint32_t csid_tpg_cgen_n_x1_addr;
+	uint32_t csid_tpg_cgen_n_x2_addr;
+	uint32_t csid_tpg_cgen_n_xy_addr;
+	uint32_t csid_tpg_cgen_n_y1_addr;
+	uint32_t csid_tpg_cgen_n_y2_addr;
+
+	/*configurations */
+	uint32_t tpg_dtn_cfg_offset;
+	uint32_t tpg_cgen_cfg_offset;
+
+};
+struct cam_ife_csid_common_reg_offset {
+	/* MIPI CSID registers */
+	uint32_t csid_hw_version_addr;
+	uint32_t csid_cfg0_addr;
+	uint32_t csid_ctrl_addr;
+	uint32_t csid_reset_addr;
+	uint32_t csid_rst_strobes_addr;
+
+	uint32_t csid_test_bus_ctrl_addr;
+	uint32_t csid_top_irq_status_addr;
+	uint32_t csid_top_irq_mask_addr;
+	uint32_t csid_top_irq_clear_addr;
+	uint32_t csid_top_irq_set_addr;
+	uint32_t csid_irq_cmd_addr;
+
+	/*configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t no_rdis;
+	uint32_t no_pix;
+	uint32_t csid_rst_stb;
+	uint32_t csid_rst_stb_sw_all;
+	uint32_t path_rst_stb_all;
+	uint32_t path_rst_done_shift_val;
+	uint32_t path_en_shift_val;
+	uint32_t dt_id_shift_val;
+	uint32_t vc_shift_val;
+	uint32_t dt_shift_val;
+	uint32_t fmt_shift_val;
+	uint32_t plain_fmt_shit_val;
+	uint32_t crop_v_en_shift_val;
+	uint32_t crop_h_en_shift_val;
+	uint32_t crop_shift;
+	uint32_t ipp_irq_mask_all;
+	uint32_t rdi_irq_mask_all;
+};
+
+/**
+ * struct cam_ife_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg:  csid common registers info
+ * @ipp_reg:  ipp register offset information
+ * @rdi_reg:  rdi register offser information
+ *
+ */
+struct cam_ife_csid_reg_offset {
+	struct cam_ife_csid_common_reg_offset   *cmn_reg;
+	struct cam_ife_csid_csi2_rx_reg_offset  *csi2_reg;
+	struct cam_ife_csid_ipp_reg_offset      *ipp_reg;
+	struct cam_ife_csid_rdi_reg_offset      *rdi_reg[CAM_IFE_CSID_RDI_MAX];
+	struct cam_ife_csid_csi2_tpg_reg_offset *tpg_reg;
+};
+
+
+/**
+ * struct cam_ife_csid_hw_info- CSID HW info
+ *
+ * @csid_reg:        csid register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximim csid clock
+ *
+ */
+struct cam_ife_csid_hw_info {
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t                             hw_dts_version;
+	uint32_t                             csid_max_clk;
+
+};
+
+
+
+/**
+ * struct cam_ife_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel:     input resource type for sensor only
+ * @lane_type:   lane type: c-phy or d-phy
+ * @lane_num :   active lane number
+ * @lane_cfg:    lane configurations: 4 bits per lane
+ *
+ */
+struct cam_ife_csid_csi2_rx_cfg  {
+	uint32_t                        phy_sel;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+};
+
+/**
+ * struct             cam_ife_csid_tpg_cfg- csid tpg configuration data
+ * @width:            width
+ * @height:           height
+ * @test_pattern :    pattern
+ * @decode_format:    decode format
+ *
+ */
+struct cam_ife_csid_tpg_cfg  {
+	uint32_t                        width;
+	uint32_t                        height;
+	uint32_t                        test_pattern;
+	uint32_t                        decode_fmt;
+};
+
+/**
+ * struct cam_ife_csid_cid_data- cid configuration private data
+ *
+ * @vc:      virtual channel
+ * @dt:      Data type
+ * @cnt:     cid resource reference count.
+ * @tpg_set: tpg used for this cid resource
+ *
+ */
+struct cam_ife_csid_cid_data {
+	uint32_t                     vc;
+	uint32_t                     dt;
+	uint32_t                     cnt;
+	uint32_t                     tpg_set;
+};
+
+
+/**
+ * struct cam_ife_csid_path_cfg- csid path configuration details. It is stored
+ *                          as private data for IPP/ RDI paths
+ * @vc :            Virtual channel number
+ * @dt :            Data type number
+ * @cid             cid number, it is same as DT_ID number in HW
+ * @decode_fmt:     input decode format
+ * @crop_enable:    crop is enable or disabled, if enabled
+ *                  then remaining parameters are valid.
+ * @start_pixel:    start pixel
+ * @width:          width
+ * @start_line:     start line
+ * @height:         heigth
+ * @sync_mode:       Applicable for IPP/RDI path reservation
+ *                  Reserving the path for master IPP or slave IPP
+ *                  master (set value 1), Slave ( set value 2)
+ *                  for RDI, set  mode to none
+ * @master_idx:     For Slave reservation, Give master IFE instance Index.
+ *                  Slave will synchronize with master Start and stop operations
+ *
+ */
+struct cam_ife_csid_path_cfg {
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        cid;
+	uint32_t                        decode_fmt;
+	bool                            crop_enable;
+	uint32_t                        start_pixel;
+	uint32_t                        width;
+	uint32_t                        start_line;
+	uint32_t                        height;
+	enum cam_isp_hw_sync_mode       sync_mode;
+	uint32_t                        master_idx;
+};
+
+/**
+ * struct cam_ife_csid_hw- csid hw device resources data
+ *
+ * @hw_intf:                  contain the csid hw interface information
+ * @hw_info:                  csid hw device information
+ * @csid_info:                csid hw specific information
+ * @res_type:                 CSID in resource type
+ * @csi2_rx_cfg:              Csi2 rx decoder configuration for csid
+ * @tpg_cfg:                  TPG configuration
+ * @csi2_rx_reserve_cnt:      CSI2 reservations count value
+ * @csi2_cfg_cnt:             csi2 configuration count
+ * @tpg_start_cnt:            tpg start count
+ * @ipp_res:                  image pixel path resource
+ * @rdi_res:                  raw dump image path resources
+ * @cid_res:                  cid resources state
+ * @csid_top_reset_complete:  csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete:  ipp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ *
+ */
+struct cam_ife_csid_hw {
+	struct cam_hw_intf              *hw_intf;
+	struct cam_hw_info              *hw_info;
+	struct cam_ife_csid_hw_info     *csid_info;
+	uint32_t                         res_type;
+	struct cam_ife_csid_csi2_rx_cfg  csi2_rx_cfg;
+	struct cam_ife_csid_tpg_cfg      tpg_cfg;
+	uint32_t                         csi2_reserve_cnt;
+	uint32_t                         csi2_cfg_cnt;
+	uint32_t                         tpg_start_cnt;
+	struct cam_isp_resource_node     ipp_res;
+	struct cam_isp_resource_node     rdi_res[CAM_IFE_CSID_RDI_MAX];
+	struct cam_isp_resource_node     cid_res[CAM_IFE_CSID_CID_RES_MAX];
+	struct completion                csid_top_complete;
+	struct completion                csid_csi2_complete;
+	struct completion                csid_ipp_complete;
+	struct completion    csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+};
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx);
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw);
+
+#endif /* _CAM_IFE_CSID_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
new file mode 100644
index 0000000..003d83f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+#include "cam_ife_csid_hw_intf.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
+	0, 0, 0, 0};
+
+int cam_ife_csid_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_ife_csid_hw_info    *csid_hw_data = NULL;
+	uint32_t                        csid_dev_idx;
+	int                             rc = 0;
+
+	CDBG("%s:%d probe called\n", __func__, __LINE__);
+
+	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+	if (!csid_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!csid_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	csid_dev = kzalloc(sizeof(struct cam_ife_csid_hw), GFP_KERNEL);
+	if (!csid_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get ife csid hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+	/* get ife csid hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("%s:%d No matching table for the IFE CSID HW!\n",
+			__func__, __LINE__);
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	csid_hw_intf->hw_idx = csid_dev_idx;
+	csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID;
+	csid_hw_intf->hw_priv = csid_hw_info;
+
+	csid_hw_info->core_info = csid_dev;
+	csid_hw_info->soc_info.pdev = pdev;
+
+	csid_hw_data = (struct cam_ife_csid_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the ife hw probe init */
+	csid_dev->csid_info = csid_hw_data;
+
+	rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, csid_dev);
+	CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+		csid_hw_intf->hw_idx);
+
+
+	if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_RES_MAX)
+		cam_ife_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(csid_dev);
+free_hw_info:
+	kfree(csid_hw_info);
+free_hw_intf:
+	kfree(csid_hw_intf);
+err:
+	return rc;
+}
+
+int cam_ife_csid_remove(struct platform_device *pdev)
+{
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+
+	csid_dev = (struct cam_ife_csid_hw *)platform_get_drvdata(pdev);
+	csid_hw_intf = csid_dev->hw_intf;
+	csid_hw_info = csid_dev->hw_info;
+
+	CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+		csid_dev->hw_intf->hw_idx);
+
+	cam_ife_csid_hw_deinit(csid_dev);
+
+	/*release the csid device memory */
+	kfree(csid_dev);
+	kfree(csid_hw_info);
+	kfree(csid_hw_intf);
+	return 0;
+}
+
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_ife_csid_hw_list[hw_idx]) {
+		*ife_csid_hw = cam_ife_csid_hw_list[hw_idx];
+	} else {
+		*ife_csid_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
new file mode 100644
index 0000000..3b213e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_DEV_H_
+#define _CAM_IFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data);
+
+int cam_ife_csid_probe(struct platform_device *pdev);
+int cam_ife_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_IFE_CSID_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
new file mode 100644
index 0000000..4ed4da5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite170.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+
+#define CAM_CSID_LITE_DRV_NAME                    "csid_lite_170"
+#define CAM_CSID_LITE_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite170_hw_info = {
+	.csid_reg = &cam_ife_csid_lite_170_reg_offset,
+	.hw_dts_version = CAM_CSID_LITE_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid_lite170_dt_match[] = {
+	{
+		.compatible = "qcom,csid-lite170",
+		.data = &cam_ife_csid_lite170_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite170_dt_match);
+
+static struct platform_driver cam_ife_csid_lite170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_LITE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid_lite170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid_lite170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid_lite170_driver);
+}
+
+static void __exit cam_ife_csid_lite170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid_lite170_driver);
+}
+
+module_init(cam_ife_csid_lite170_init_module);
+module_exit(cam_ife_csid_lite170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
new file mode 100644
index 0000000..e857f8b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE170_H_
+#define _CAM_IFE_CSID_LITE170_H_
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_0_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x30,
+	.csid_rdi_irq_mask_addr                   = 0x34,
+	.csid_rdi_irq_clear_addr                  = 0x38,
+	.csid_rdi_irq_set_addr                    = 0x3c,
+	.csid_rdi_cfg0_addr                       = 0x200,
+	.csid_rdi_cfg1_addr                       = 0x204,
+	.csid_rdi_ctrl_addr                       = 0x208,
+	.csid_rdi_frm_drop_pattern_addr           = 0x20c,
+	.csid_rdi_frm_drop_period_addr            = 0x210,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x214,
+	.csid_rdi_irq_subsample_period_addr       = 0x218,
+	.csid_rdi_rpp_hcrop_addr                  = 0x21c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x220,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x224,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x228,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x22c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x230,
+	.csid_rdi_rst_strobes_addr                = 0x240,
+	.csid_rdi_status_addr                     = 0x250,
+	.csid_rdi_misr_val0_addr                  = 0x254,
+	.csid_rdi_misr_val1_addr                  = 0x258,
+	.csid_rdi_misr_val2_addr                  = 0x25c,
+	.csid_rdi_misr_val3_addr                  = 0x260,
+	.csid_rdi_format_measure_cfg0_addr        = 0x270,
+	.csid_rdi_format_measure_cfg1_addr        = 0x274,
+	.csid_rdi_format_measure0_addr            = 0x278,
+	.csid_rdi_format_measure1_addr            = 0x27c,
+	.csid_rdi_format_measure2_addr            = 0x280,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x290,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x294,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x298,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x29c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x2a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x2a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x2a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x2ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x2e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x2e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_1_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_2_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x434,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_3_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+	cam_ife_csid_lite_170_csi2_reg_offset = {
+
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+	cam_ife_csid_lite_170_tpg_reg_offset = {
+
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+
+static struct cam_ife_csid_common_reg_offset
+	cam_csid_lite_170_cmn_reg_offset = {
+
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 4,
+	.no_pix                                       = 0,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_lite_170_reg_offset = {
+	.cmn_reg          = &cam_csid_lite_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_lite_170_csi2_reg_offset,
+	.ipp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_lite_170_rdi_0_reg_offset,
+		&cam_ife_csid_lite_170_rdi_1_reg_offset,
+		&cam_ife_csid_lite_170_rdi_2_reg_offset,
+		&cam_ife_csid_lite_170_rdi_3_reg_offset,
+		},
+	.tpg_reg = &cam_ife_csid_lite_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
new file mode 100644
index 0000000..f07c45e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -0,0 +1,94 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_ife_csid_soc.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	struct csid_device_soc_info *csid_soc_info = NULL;
+	int rc = 0;
+
+	of_node = soc_info->pdev->dev.of_node;
+	csid_soc_info = (struct csid_device_soc_info *)soc_info->soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_ife_csid_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler,
+	void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ife_csid_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+
+	rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	CDBG("%s: mem_base is 0x%llx\n", __func__,
+		(uint64_t) soc_info->reg_map[0].mem_base);
+
+	return rc;
+}
+
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("%s: enable platform failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("%s: Disable platform failed\n", __func__);
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
new file mode 100644
index 0000000..218e05a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_SOC_H_
+#define _CAM_IFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage:       csi vdd voltage value
+ *
+ */
+struct csid_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_ife_csid_init_soc_resources()
+ *
+ * @brief:                 csid initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ * @csid_irq_handler:      irq handler function to be registered
+ * @irq_data:              irq data for the callback function
+ *
+ */
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data);
+
+/**
+ * cam_ife_csid_enable_soc_resources()
+ *
+ * @brief:                 csid soc resource enable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info);
+
+/**
+ * cam_ife_csid_disable_soc_resources()
+ *
+ * @brief:                 csid soc resource disable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
new file mode 100644
index 0000000..ecc6f0e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_CSID_HW_INTF_H__
+#define __CAM_CSID_HW_INTF_H__
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX IFE CSID instance */
+#define CAM_IFE_CSID_HW_NUM_MAX                        4
+
+
+/**
+ * enum cam_ife_pix_path_res_id - Specify the csid patch
+ */
+enum cam_ife_pix_path_res_id {
+	CAM_IFE_PIX_PATH_RES_RDI_0,
+	CAM_IFE_PIX_PATH_RES_RDI_1,
+	CAM_IFE_PIX_PATH_RES_RDI_2,
+	CAM_IFE_PIX_PATH_RES_RDI_3,
+	CAM_IFE_PIX_PATH_RES_IPP,
+	CAM_IFE_PIX_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_ife_cid_res_id - Specify the csid cid
+ */
+enum cam_ife_cid_res_id {
+	CAM_IFE_CSID_CID_0,
+	CAM_IFE_CSID_CID_1,
+	CAM_IFE_CSID_CID_2,
+	CAM_IFE_CSID_CID_3,
+	CAM_IFE_CSID_CID_MAX,
+};
+
+
+/**
+ * struct cam_ife_csid_hw_caps- get the CSID hw capability
+ * @no_rdis :       number of rdis supported by CSID HW device
+ * @no_pix:         number of pixel path supported by CSID HW device
+ * @major_version : major version
+ * @minor_version:  minor version
+ * @version_incr:   version increment
+ *
+ */
+struct cam_ife_csid_hw_caps {
+	uint32_t      no_rdis;
+	uint32_t      no_pix;
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+
+/**
+ * struct cam_csid_hw_reserve_resource- hw reserve
+ * @res_type :  reource type CID or PATH
+ *              if type is CID, then res_id is not required,
+ *              if type is path then res id need to be filled
+ * @res_id  :  res id to be reserved
+ * @in_port : input port resource info
+ * @sync_mode : Sync mode
+ *              Sync mode could be master, slave or none
+ * @master_idx: master device index to be configured in the slave path
+ *              for master path, this value is not required.
+ *              only slave need to configure the master index value
+ * @cid:        cid (DT_ID) value for path, this is applicable for CSID path
+ *              reserve
+ * @node_res :  reserved resource structure pointer
+ *
+ */
+struct cam_csid_hw_reserve_resource_args {
+	enum cam_isp_resource_type                res_type;
+	uint32_t                                  res_id;
+	struct cam_isp_in_port_info              *in_port;
+	enum cam_isp_hw_sync_mode                 sync_mode;
+	uint32_t                                  master_idx;
+	uint32_t                                  cid;
+	struct cam_isp_resource_node             *node_res;
+
+};
+
+
+/**
+ *  enum cam_ife_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_ife_csid_halt_cmd {
+	CAM_CSID_HALT_AT_FRAME_BOUNDARY,
+	CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+	CAM_CSID_HALT_IMMEDIATELY,
+	CAM_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ *             if stop command set to Halt immediately,driver will stop
+ *             path immediately, manager need to reset the path after HI
+ *             if stop command set to halt at frame boundary, driver will set
+ *             halt at frame boundary and wait for frame boundary
+ * @node_res :  reource pointer array( ie cid or CSID)
+ * @num_res :   number of resources to be stopped
+ *
+ */
+struct cam_csid_hw_stop_args {
+	enum cam_ife_csid_halt_cmd                stop_cmd;
+	struct cam_isp_resource_node            **node_res;
+	uint32_t                                  num_res;
+};
+
+/**
+ * enum cam_ife_csid_reset_type - Specify the reset type
+ */
+enum cam_ife_csid_reset_type {
+	CAM_IFE_CSID_RESET_GLOBAL,
+	CAM_IFE_CSID_RESET_PATH,
+	CAM_IFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_ife_csid_reset_cfg-  csid reset configuration
+ * @ reset_type : Global reset or path reset
+ * @res_node :   resource need to be reset
+ *
+ */
+struct cam_csid_reset_cfg_args {
+	enum cam_ife_csid_reset_type   reset_type;
+	struct cam_isp_resource_node  *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args-  time stamp capture arguments
+ * @res_node :   resource to get the time stamp
+ * @ time_stamp_val : captured time stamp
+ *
+ */
+struct cam_csid_get_time_stamp_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           time_stamp_val;
+};
+
+/**
+ * enum cam_ife_csid_cmd_type - Specify the csid command
+ */
+enum cam_ife_csid_cmd_type {
+	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_IFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_ife_csid_hw_init()
+ *
+ * @brief:               Initialize function for the CSID hardware
+ *
+ * @ife_csid_hw:         CSID hardware instance returned
+ * @hw_idex:             CSID hardware instance id
+ */
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx);
+
+#endif /* __CAM_CSID_HW_INTF_H__ */
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 0898414..6739fb0 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -338,6 +338,7 @@
 		"5",
 		"5.1",
 		"5.2",
+		"Unknown",
 		NULL,
 	};
 	static const char * const h264_loop_filter[] = {
diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig
index 9c73960..e8a7960 100644
--- a/drivers/misc/qcom/Kconfig
+++ b/drivers/misc/qcom/Kconfig
@@ -1,6 +1,5 @@
 config MSM_QDSP6V2_CODECS
 	bool "Audio QDSP6V2 APR support"
-	depends on MSM_SMD
 	select SND_SOC_QDSP6V2
 	help
 	  Enable Audio codecs with APR IPC protocol support between
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e802fbd..f8a7555 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -858,7 +858,6 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
 	POWER_SUPPLY_PROP_CHARGER_TEMP,
 	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
@@ -878,6 +877,8 @@
 	POWER_SUPPLY_PROP_DIE_HEALTH,
 	POWER_SUPPLY_PROP_RERUN_AICL,
 	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -907,9 +908,12 @@
 	case POWER_SUPPLY_PROP_CAPACITY:
 		rc = smblib_get_prop_batt_capacity(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_get_prop_system_temp_level(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		rc = smblib_get_prop_system_temp_level_max(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
 		/* do not query RRADC if charger is not present */
 		rc = smblib_get_prop_usb_present(chg, &pval);
@@ -1004,7 +1008,7 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smblib_set_prop_input_suspend(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_set_prop_system_temp_level(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 7d5a8bd..f4ae415 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1635,6 +1635,13 @@
 	return 0;
 }
 
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->thermal_levels;
+	return 0;
+}
+
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val)
 {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b0d84f0..5409166 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -398,6 +398,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_system_temp_level(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index c45fb0d..b1e6a3b 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -290,6 +290,10 @@
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK	BIT(16)
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN	BIT(16)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK	BIT(13)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN	BIT(13)
 #define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
 #define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
 
@@ -1449,6 +1453,16 @@
 				  ctrl->acd_adj_down_step_size <<
 				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
 		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK,
+				  (ctrl->acd_notwait_for_cl_settled
+				   ? CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK,
+				  (ctrl->acd_adj_avg_fast_update
+				   ? CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
 				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
 				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
 	}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 8535020..a315e46 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -661,6 +661,10 @@
  * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
  *			target quotient adjustment due to an ACD up
  *			recommendation.
+ * @acd_notwait_for_cl_settled: Boolean which indicates ACD down recommendations
+ *			do not need to wait for CPR closed-loop to settle.
+ * @acd_adj_avg_fast_update: Boolean which indicates if CPR should issue
+ *			immediate voltage updates on ACD requests.
  * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
  *			feature.
  * @count_mode:		CPR controller count mode
@@ -828,6 +832,8 @@
 	u32			acd_adj_up_step_limit;
 	u32			acd_adj_down_step_size;
 	u32			acd_adj_up_step_size;
+	bool			acd_notwait_for_cl_settled;
+	bool			acd_adj_avg_fast_update;
 	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index f7f0299..cf7c35d 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -2282,6 +2282,13 @@
 				 rc);
 			return rc;
 		}
+
+		ctrl->acd_notwait_for_cl_settled =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-notwait-for-cl-settled");
+		ctrl->acd_adj_avg_fast_update =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-fast-update");
 	}
 
 	rc = of_property_read_u32(ctrl->dev->of_node,
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index af17066..243b2d1 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -154,16 +154,6 @@
 	else
 		fn = NULL;
 
-	/*
-	 * Forcibly set runtime PM status of request queue to "active" to
-	 * make sure we can again get requests from the queue (see also
-	 * blk_pm_peek_request()).
-	 *
-	 * The resume hook will correct runtime PM status of the disk.
-	 */
-	if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
-		blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
 	if (fn) {
 		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
 
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index c5ba279..b71ce6b 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -649,6 +649,7 @@
 		}
 		drv->dp_size = dp_fw->size;
 		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
 	}
 
 	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index b2cf03c..9fdd63a 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -1,5 +1,3 @@
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o
-obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o
 obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o
 obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o
 obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c
deleted file mode 100644
index 5c296f66..0000000
--- a/drivers/soc/qcom/qdsp6v2/apr_tal.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* Copyright (c) 2010-2011, 2013-2014, 2016-2017 The Linux Foundation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <soc/qcom/smd.h>
-#include <linux/qdsp6v2/apr_tal.h>
-
-static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-};
-
-struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-
-int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int w_len;
-	unsigned long flags;
-
-	spin_lock_irqsave(&apr_ch->w_lock, flags);
-	if (smd_write_avail(apr_ch->ch) < len) {
-		spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-		return -EAGAIN;
-	}
-
-	w_len = smd_write(apr_ch->ch, data, len);
-	spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-
-	pr_debug("apr_tal:w_len = %d\n", w_len);
-
-	if (w_len != len) {
-		pr_err("apr_tal: Error in write\n");
-		return -ENETRESET;
-	}
-	return w_len;
-}
-
-int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int rc = 0, retries = 0;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	do {
-		if (rc == -EAGAIN)
-			udelay(50);
-
-		rc = __apr_tal_write(apr_ch, data, pkt_priv, len);
-	} while (rc == -EAGAIN && retries++ < 300);
-
-	if (rc == -EAGAIN)
-		pr_err("apr_tal: TIMEOUT for write\n");
-
-	return rc;
-}
-
-static void apr_tal_notify(void *priv, unsigned int event)
-{
-	struct apr_svc_ch_dev *apr_ch = priv;
-	int len, r_len, sz;
-	int pkt_cnt = 0;
-	unsigned long flags;
-
-	pr_debug("event = %d\n", event);
-	switch (event) {
-	case SMD_EVENT_DATA:
-		pkt_cnt = 0;
-		spin_lock_irqsave(&apr_ch->lock, flags);
-check_pending:
-		len = smd_read_avail(apr_ch->ch);
-		if (len < 0) {
-			pr_err("apr_tal: Invalid Read Event :%d\n", len);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		sz = smd_cur_packet_size(apr_ch->ch);
-		if (sz < 0) {
-			pr_debug("pkt size is zero\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		if (!len && !sz && !pkt_cnt)
-			goto check_write_avail;
-		if (!len) {
-			pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len);
-		if (len != r_len) {
-			pr_err("apr_tal: Invalid Read\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		pkt_cnt++;
-		pr_debug("%d %d %d\n", len, sz, pkt_cnt);
-		if (apr_ch->func)
-			apr_ch->func(apr_ch->data, r_len, apr_ch->priv);
-		goto check_pending;
-check_write_avail:
-		if (smd_write_avail(apr_ch->ch))
-			wake_up(&apr_ch->wait);
-		spin_unlock_irqrestore(&apr_ch->lock, flags);
-		break;
-	case SMD_EVENT_OPEN:
-		pr_debug("apr_tal: SMD_EVENT_OPEN\n");
-		apr_ch->smd_state = 1;
-		wake_up(&apr_ch->wait);
-		break;
-	case SMD_EVENT_CLOSE:
-		pr_debug("apr_tal: SMD_EVENT_CLOSE\n");
-		break;
-	}
-}
-
-int apr_tal_rx_intents_config(struct apr_svc_ch_dev *apr_ch,
-			int num_of_intents, uint32_t size)
-{
-	/* Rx intents configuration is required for Glink
-	 * but not for SMD. No-op for this function.
-	 */
-	return 0;
-}
-
-struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
-				uint32_t dl, apr_svc_cb_fn func, void *priv)
-{
-	int rc;
-
-	if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
-						(dl >= APR_DL_MAX)) {
-		pr_err("apr_tal: Invalid params\n");
-		return NULL;
-	}
-
-	if (apr_svc_ch[dl][dest][clnt].ch) {
-		pr_err("apr_tal: This channel alreday openend\n");
-		return NULL;
-	}
-
-	mutex_lock(&apr_svc_ch[dl][dest][clnt].m_lock);
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].dest,
-			apr_svc_ch[dl][dest][clnt].dest_state,
-				msecs_to_jiffies(APR_OPEN_TIMEOUT_MS));
-		if (rc == 0) {
-			pr_err("apr_tal:open timeout\n");
-			mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-			return NULL;
-		}
-		pr_debug("apr_tal:Wakeup done\n");
-		apr_svc_ch[dl][dest][clnt].dest_state = 0;
-	}
-	rc = smd_named_open_on_edge(svc_names[dest][clnt], dest,
-			&apr_svc_ch[dl][dest][clnt].ch,
-			&apr_svc_ch[dl][dest][clnt],
-			apr_tal_notify);
-	if (rc < 0) {
-		pr_err("apr_tal: smd_open failed %s\n",
-					svc_names[dest][clnt]);
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		return NULL;
-	}
-	rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].wait,
-		(apr_svc_ch[dl][dest][clnt].smd_state == 1), 5 * HZ);
-	if (rc == 0) {
-		pr_err("apr_tal:TIMEOUT for OPEN event\n");
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		apr_tal_close(&apr_svc_ch[dl][dest][clnt]);
-		return NULL;
-	}
-
-	smd_disable_read_intr(apr_svc_ch[dl][dest][clnt].ch);
-
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		apr_svc_ch[dl][dest][clnt].dest_state = 1;
-		pr_debug("apr_tal:Waiting for apr svc init\n");
-		msleep(200);
-		pr_debug("apr_tal:apr svc init done\n");
-	}
-	apr_svc_ch[dl][dest][clnt].smd_state = 0;
-
-	apr_svc_ch[dl][dest][clnt].func = func;
-	apr_svc_ch[dl][dest][clnt].priv = priv;
-	mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-
-	return &apr_svc_ch[dl][dest][clnt];
-}
-
-int apr_tal_close(struct apr_svc_ch_dev *apr_ch)
-{
-	int r;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	mutex_lock(&apr_ch->m_lock);
-	r = smd_close(apr_ch->ch);
-	apr_ch->ch = NULL;
-	apr_ch->func = NULL;
-	apr_ch->priv = NULL;
-	mutex_unlock(&apr_ch->m_lock);
-	return r;
-}
-
-static int apr_smd_probe(struct platform_device *pdev)
-{
-	int dest;
-	int clnt;
-
-	if (pdev->id == APR_DEST_MODEM) {
-		pr_info("apr_tal:Modem Is Up\n");
-		dest = APR_DEST_MODEM;
-		if (!strcmp(pdev->name, "apr_audio_svc"))
-			clnt = APR_CLIENT_AUDIO;
-		else
-			clnt = APR_CLIENT_VOICE;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else if (pdev->id == APR_DEST_QDSP6) {
-		pr_info("apr_tal:Q6 Is Up\n");
-		dest = APR_DEST_QDSP6;
-		clnt = APR_CLIENT_AUDIO;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else
-		pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id);
-
-	return 0;
-}
-
-static struct platform_driver apr_q6_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_audio_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static struct platform_driver apr_modem_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_voice_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init apr_tal_init(void)
-{
-	int i, j, k;
-
-	for (i = 0; i < APR_DL_MAX; i++)
-		for (j = 0; j < APR_DEST_MAX; j++)
-			for (k = 0; k < APR_CLIENT_MAX; k++) {
-				init_waitqueue_head(&apr_svc_ch[i][j][k].wait);
-				init_waitqueue_head(&apr_svc_ch[i][j][k].dest);
-				spin_lock_init(&apr_svc_ch[i][j][k].lock);
-				spin_lock_init(&apr_svc_ch[i][j][k].w_lock);
-				mutex_init(&apr_svc_ch[i][j][k].m_lock);
-			}
-	platform_driver_register(&apr_q6_driver);
-	platform_driver_register(&apr_modem_driver);
-	return 0;
-}
-device_initcall(apr_tal_init);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index f0be6e9..984241f9 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -35,11 +35,6 @@
 
 #include "thermal_core.h"
 
-#define for_each_tz_sibling(pos, head)                                         \
-	for (pos = list_first_entry((head), struct __thermal_zone, list);\
-		&(pos->list) != (head);                                  \
-		pos = list_next_entry(pos, list))                        \
-
 /***   Private data structures to represent thermal device tree data ***/
 /**
  * struct __thermal_bind_param - a match between trip and cooling device
@@ -436,7 +431,7 @@
 	enum thermal_trip_type type = 0;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		for (trip = 0; trip < data->ntrips; trip++) {
 			of_thermal_get_trip_type(zone, trip, &type);
@@ -499,7 +494,7 @@
 	struct list_head *head;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		thermal_zone_device_update(zone, THERMAL_EVENT_UNSPECIFIED);
 	}
@@ -684,7 +679,7 @@
 void thermal_zone_of_sensor_unregister(struct device *dev,
 				       struct thermal_zone_device *tzd)
 {
-	struct __thermal_zone *tz;
+	struct __thermal_zone *tz, *next;
 	struct thermal_zone_device *pos;
 	struct list_head *head;
 
@@ -698,7 +693,7 @@
 		return;
 
 	head = &tz->senps->first_tz;
-	for_each_tz_sibling(tz, head) {
+	list_for_each_entry_safe(tz, next, head, list) {
 		pos = tz->tzd;
 		mutex_lock(&pos->lock);
 		pos->ops->get_temp = NULL;
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index f8a7945..65dc2df 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -108,6 +108,7 @@
 };
 
 LIST_HEAD(lmh_dcvs_hw_list);
+DEFINE_MUTEX(lmh_dcvs_list_access);
 
 static int limits_dcvs_get_freq_limits(uint32_t cpu, unsigned long *max_freq,
 					 unsigned long *min_freq)
@@ -308,10 +309,14 @@
 {
 	struct limits_dcvs_hw *hw;
 
+	mutex_lock(&lmh_dcvs_list_access);
 	list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
-		if (cpumask_test_cpu(cpu, &hw->core_map))
+		if (cpumask_test_cpu(cpu, &hw->core_map)) {
+			mutex_unlock(&lmh_dcvs_list_access);
 			return hw;
+		}
 	}
+	mutex_unlock(&lmh_dcvs_list_access);
 
 	return NULL;
 }
@@ -388,6 +393,42 @@
 	.floor_limit = lmh_set_min_limit,
 };
 
+static int limits_cpu_online(unsigned int online_cpu)
+{
+	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(online_cpu);
+	unsigned int idx = 0, cpu = 0;
+
+	if (!hw)
+		return 0;
+
+	for_each_cpu(cpu, &hw->core_map) {
+		cpumask_t cpu_mask  = { CPU_BITS_NONE };
+
+		if (cpu != online_cpu) {
+			idx++;
+			continue;
+		} else if (hw->cdev_data[idx].cdev) {
+			return 0;
+		}
+		cpumask_set_cpu(cpu, &cpu_mask);
+		hw->cdev_data[idx].max_freq = U32_MAX;
+		hw->cdev_data[idx].min_freq = 0;
+		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
+						&cpu_mask, &cd_ops);
+		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev)) {
+			pr_err("CPU:%u cooling device register error:%ld\n",
+				cpu, PTR_ERR(hw->cdev_data[idx].cdev));
+			hw->cdev_data[idx].cdev = NULL;
+		} else {
+			pr_debug("CPU:%u cooling device registered\n", cpu);
+		}
+		break;
+
+	}
+
+	return 0;
+}
+
 static int limits_dcvs_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -398,7 +439,7 @@
 	struct device_node *cpu_node, *lmh_node;
 	uint32_t request_reg, clear_reg, min_reg;
 	unsigned long max_freq, min_freq;
-	int cpu, idx;
+	int cpu;
 	cpumask_t mask = { CPU_BITS_NONE };
 
 	for_each_possible_cpu(cpu) {
@@ -491,22 +532,6 @@
 	if (IS_ERR_OR_NULL(tzdev))
 		return PTR_ERR(tzdev);
 
-	/* Setup cooling devices to request mitigation states */
-	mutex_init(&hw->access_lock);
-	idx = 0;
-	for_each_cpu(cpu, &hw->core_map) {
-		cpumask_t cpu_mask  = { CPU_BITS_NONE };
-
-		cpumask_set_cpu(cpu, &cpu_mask);
-		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
-						&cpu_mask, &cd_ops);
-		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev))
-			return PTR_ERR(hw->cdev_data[idx].cdev);
-		hw->cdev_data[idx].max_freq = U32_MAX;
-		hw->cdev_data[idx].min_freq = 0;
-		idx++;
-	}
-
 	switch (affinity) {
 	case 0:
 		request_reg = LIMITS_CLUSTER_0_REQ;
@@ -519,33 +544,36 @@
 		min_reg = LIMITS_CLUSTER_1_MIN_FREQ;
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unregister_sensor;
 	};
 
+	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+	if (!hw->min_freq_reg) {
+		pr_err("min frequency enable register remap failed\n");
+		ret = -ENOMEM;
+		goto unregister_sensor;
+	}
+
+	mutex_init(&hw->access_lock);
+	init_timer_deferrable(&hw->poll_timer);
+	hw->poll_timer.data = (unsigned long)hw;
+	hw->poll_timer.function = limits_dcvs_poll;
 	hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
 	if (!hw->osm_hw_reg) {
 		pr_err("register remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
 	hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
 	if (!hw->int_clr_reg) {
 		pr_err("interrupt clear reg remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
-	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
-	if (!hw->min_freq_reg) {
-		pr_err("min frequency enable register remap failed\n");
-		return -ENOMEM;
-	}
-	init_timer_deferrable(&hw->poll_timer);
-	hw->poll_timer.data = (unsigned long)hw;
-	hw->poll_timer.function = limits_dcvs_poll;
 
 	hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
 	if (hw->irq_num < 0) {
-		ret = hw->irq_num;
-		pr_err("Error getting IRQ number. err:%d\n", ret);
-		return ret;
+		pr_err("Error getting IRQ number. err:%d\n", hw->irq_num);
+		goto probe_exit;
 	}
 	atomic_set(&hw->is_irq_enabled, 1);
 	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
@@ -553,11 +581,26 @@
 		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
 	if (ret) {
 		pr_err("Error registering for irq. err:%d\n", ret);
-		return ret;
+		ret = 0;
+		goto probe_exit;
 	}
 
+probe_exit:
+	mutex_lock(&lmh_dcvs_list_access);
 	INIT_LIST_HEAD(&hw->list);
 	list_add(&hw->list, &lmh_dcvs_hw_list);
+	mutex_unlock(&lmh_dcvs_list_access);
+
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
+				limits_cpu_online, NULL);
+	if (ret < 0)
+		goto unregister_sensor;
+	ret = 0;
+
+	return ret;
+
+unregister_sensor:
+	thermal_zone_of_sensor_unregister(&pdev->dev, tzdev);
 
 	return ret;
 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6012da3..918f659 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -435,8 +435,8 @@
 #define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
 #define PORT_BESLD(p)(((p) & 0xf) << 10)
 
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT		512
+/* use 128 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT		128
 
 /* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
  * Safe to use with mixed HIRD and BESL systems (host and device) and is used
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index bac5e90..26d1a4c 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -75,8 +75,6 @@
 		int num_of_intents, uint32_t size);
 
 
-#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
-	 defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
 struct apr_svc_ch_dev {
 	void               *handle;
 	spinlock_t         w_lock;
@@ -88,20 +86,5 @@
 	unsigned int       channel_state;
 	bool               if_remote_intent_ready;
 };
-#else
-struct apr_svc_ch_dev {
-	struct smd_channel *ch;
-	spinlock_t         lock;
-	spinlock_t         w_lock;
-	struct mutex       m_lock;
-	apr_svc_cb_fn      func;
-	char               data[APR_MAX_BUF];
-	wait_queue_head_t  wait;
-	void               *priv;
-	uint32_t           smd_state;
-	wait_queue_head_t  dest;
-	uint32_t           dest_state;
-};
-#endif
 
 #endif
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index ac559f2..7161102 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -238,6 +238,9 @@
 	MSM_VIDC_EXTRADATA_FRAME_BITS_INFO = 0x00000010,
 	MSM_VIDC_EXTRADATA_VQZIP_SEI = 0x00000011,
 	MSM_VIDC_EXTRADATA_ROI_QP = 0x00000013,
+#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x00000014,
 #define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI \
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI = 0x00000015,
@@ -252,9 +255,6 @@
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP = 0x0700000F,
 	MSM_VIDC_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
-#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x070000011,
 	MSM_VIDC_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
 	MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
 	MSM_VIDC_EXTRADATA_INDEX = 0x7F100002,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 9c6f471..17224de 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -945,7 +945,7 @@
 	tristate
 
 config SND_SOC_WCD_SPI
-	depends on CONFIG_SPI
+	depends on SPI
 	tristate
 
 config SND_SOC_WL1273
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 4b6fcb0b..c0a32f3 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -8516,6 +8516,7 @@
 	{WCD934X_HPH_L_TEST, 0x01, 0x01},
 	{WCD934X_HPH_R_TEST, 0x01, 0x01},
 	{WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20},
+	{WCD934X_MBHC_NEW_CTL_2, 0x0C, 0x00},
 };
 
 static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {